func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) []*Change { if len(a.Links) == 0 && len(b.Links) == 0 { ak, _ := a.Key() bk, _ := b.Key() return []*Change{ &Change{ Type: Mod, Before: ak, After: bk, }, } } var out []*Change clean_a := a.Copy() clean_b := b.Copy() // strip out unchanged stuff for _, lnk := range a.Links { l, err := b.GetNodeLink(lnk.Name) if err == nil { if bytes.Equal(l.Hash, lnk.Hash) { // no change... ignore it } else { anode, _ := lnk.GetNode(ctx, ds) bnode, _ := l.GetNode(ctx, ds) sub := Diff(ctx, ds, anode, bnode) for _, subc := range sub { subc.Path = path.Join(lnk.Name, subc.Path) out = append(out, subc) } } clean_a.RemoveNodeLink(l.Name) clean_b.RemoveNodeLink(l.Name) } } for _, lnk := range clean_a.Links { out = append(out, &Change{ Type: Remove, Path: lnk.Name, Before: key.Key(lnk.Hash), }) } for _, lnk := range clean_b.Links { out = append(out, &Change{ Type: Add, Path: lnk.Name, After: key.Key(lnk.Hash), }) } return out }
// resolveOnce implements resolver. Uses the IPFS routing system to // resolve SFS-like names. func (r *routingResolver) resolveOnce(ctx context.Context, name string) (path.Path, error) { log.Debugf("RoutingResolve: '%s'", name) hash, err := mh.FromB58String(name) if err != nil { log.Warning("RoutingResolve: bad input hash: [%s]\n", name) return "", err } // name should be a multihash. if it isn't, error out here. // use the routing system to get the name. // /ipns/<name> h := []byte("/ipns/" + string(hash)) ipnsKey := key.Key(h) val, err := r.routing.GetValue(ctx, ipnsKey) if err != nil { log.Warning("RoutingResolve get failed.") return "", err } entry := new(pb.IpnsEntry) err = proto.Unmarshal(val, entry) if err != nil { return "", err } // name should be a public key retrievable from ipfs pubkey, err := routing.GetPublicKey(r.routing, ctx, hash) if err != nil { return "", err } hsh, _ := pubkey.Hash() log.Debugf("pk hash = %s", key.Key(hsh)) // check sig with pk if ok, err := pubkey.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok { return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pubkey) } // ok sig checks out. this is a valid name. // check for old style record: valh, err := mh.Cast(entry.GetValue()) if err != nil { // Not a multihash, probably a new record return path.ParsePath(string(entry.GetValue())) } else { // Its an old style multihash record log.Warning("Detected old style multihash record") return path.FromKey(key.Key(valh)), nil } }
func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } var count int for i, ng := range rw.DAG.GetDAG(rw.Ctx, n) { lk := key.Key(n.Links[i].Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, n.Links[i].Name); err != nil { return count, err } nd, err := ng.Get(rw.Ctx) if err != nil { return count, err } c, err := rw.writeRefsRecursive(nd) count += c if err != nil { return count, err } } return count, nil }
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue } if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL) } dht.providers.AddProvider(ctx, key, p) } return nil, nil }
// putProvider sends a message to peer 'p' saying that the local node // can provide the value of 'key' func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, skey string) error { // add self as the provider pi := peer.PeerInfo{ ID: dht.self, Addrs: dht.host.Addrs(), } // // only share WAN-friendly addresses ?? // pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs) if len(pi.Addrs) < 1 { // log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, key.Key(key), pi.Addrs) return fmt.Errorf("no known addresses for self. cannot put provider.") } pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, skey, 0) pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]peer.PeerInfo{pi}) err := dht.sendMessage(ctx, p, pmes) if err != nil { return err } log.Debugf("%s putProvider: %s for %s (%s)", dht.self, p, key.Key(skey), pi.Addrs) return nil }
func defaultRepo(dstore ds.ThreadSafeDatastore) (repo.Repo, error) { c := cfg.Config{} priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) if err != nil { return nil, err } data, err := pub.Hash() if err != nil { return nil, err } privkeyb, err := priv.Bytes() if err != nil { return nil, err } c.Bootstrap = cfg.DefaultBootstrapAddresses c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} c.Identity.PeerID = key.Key(data).B58String() c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) return &repo.Mock{ D: dstore, C: c, }, nil }
// NewFilesystem instantiates an ipns filesystem using the given parameters and locally owned keys func NewFilesystem(ctx context.Context, ds dag.DAGService, nsys namesys.NameSystem, pins pin.Pinner, keys ...ci.PrivKey) (*Filesystem, error) { roots := make(map[string]*KeyRoot) fs := &Filesystem{ ctx: ctx, roots: roots, nsys: nsys, dserv: ds, pins: pins, resolver: &path.Resolver{DAG: ds}, } for _, k := range keys { pkh, err := k.GetPublic().Hash() if err != nil { return nil, err } root, err := fs.newKeyRoot(ctx, k) if err != nil { return nil, err } roots[key.Key(pkh).Pretty()] = root } return fs, nil }
// VerifyRecord checks a record and ensures it is still valid. // It runs needed validators func (v Validator) VerifyRecord(r *pb.Record) error { // Now, check validity func parts := strings.Split(r.GetKey(), "/") if len(parts) < 3 { log.Infof("Record key does not have validator: %s", key.Key(r.GetKey())) return nil } val, ok := v[parts[1]] if !ok { log.Infof("Unrecognized key prefix: %s", parts[1]) return ErrInvalidRecordType } return val.Func(key.Key(r.GetKey()), r.GetValue()) }
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine // for it func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) { hash, err := k.GetPublic().Hash() if err != nil { return nil, err } name := "/ipns/" + key.Key(hash).String() root := new(KeyRoot) root.key = k root.fs = fs root.name = name ctx, cancel := context.WithCancel(parent) defer cancel() pointsTo, err := fs.nsys.Resolve(ctx, name) if err != nil { err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k) if err != nil { return nil, err } pointsTo, err = fs.nsys.Resolve(ctx, name) if err != nil { return nil, err } } mnode, err := fs.resolver.ResolvePath(ctx, pointsTo) if err != nil { log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err) return nil, err } root.node = mnode root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3) go root.repub.Run(parent) pbn, err := ft.FromBytes(mnode.Data) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(pointsTo.String(), mnode, root, fs) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } if rw.skip(nkey) { return 0, nil } count := 0 for _, l := range n.Links { lk := key.Key(l.Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, l.Name); err != nil { return count, err } count++ } return count, nil }
func publish(ctx context.Context, n *core.IpfsNode, k crypto.PrivKey, ref path.Path, opts *publishOpts) (*IpnsEntry, error) { if opts.verifyExists { // verify the path exists _, err := core.Resolve(ctx, n, ref) if err != nil { return nil, err } } eol := time.Now().Add(opts.pubValidTime) err := n.Namesys.PublishWithEOL(ctx, k, ref, eol) if err != nil { return nil, err } hash, err := k.GetPublic().Hash() if err != nil { return nil, err } return &IpnsEntry{ Name: key.Key(hash).String(), Value: ref.String(), }, nil }
// GetDAG will fill out all of the links of the given Node. // It returns a channel of nodes, which the caller can receive // all the child nodes of 'root' on, in proper order. func (ds *dagService) GetDAG(ctx context.Context, root *Node) []NodeGetter { var keys []key.Key for _, lnk := range root.Links { keys = append(keys, key.Key(lnk.Hash)) } return ds.GetNodes(ctx, keys) }
// Removes the child node at the given index func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) { k := key.Key(n.node.Links[index].Hash) if dbh.mp != nil { dbh.mp.RemovePinWithMode(k, pin.Indirect) } n.ufmt.RemoveBlockSize(index) n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...) }
// Loggable turns a Message into machine-readable log output func (m *Message) Loggable() map[string]interface{} { return map[string]interface{}{ "message": map[string]string{ "type": m.Type.String(), "key": key.Key(m.GetKey()).Pretty(), }, } }
func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := newMsg(pbm.GetWantlist().GetFull()) for _, e := range pbm.GetWantlist().GetEntries() { m.addEntry(key.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) m.AddBlock(b) } return m }
func escapeDhtKey(s string) (key.Key, error) { parts := strings.Split(s, "/") switch len(parts) { case 1: return key.B58KeyDecode(s), nil case 3: k := key.B58KeyDecode(parts[2]) return key.Key(strings.Join(append(parts[:2], string(k)), "/")), nil default: return "", errors.New("invalid key") } }
// Store a value in this peer local storage func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handlePutValue", p).Done() dskey := key.Key(pmes.GetKey()).DsKey() if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil { log.Warningf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err) return nil, err } rec := pmes.GetRecord() // record the time we receive every record rec.TimeReceived = proto.String(u.FormatRFC3339(time.Now())) data, err := proto.Marshal(rec) if err != nil { return nil, err } err = dht.datastore.Put(dskey, data) log.Debugf("%s handlePutValue %v", dht.self, dskey) return pmes, err }
// ResolvePathComponents fetches the nodes for each segment of the given path. // It uses the first path component as a hash (key) of the first node, then // resolves all other components walking the links, with ResolveLinks. func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*merkledag.Node, error) { h, parts, err := SplitAbsPath(fpath) if err != nil { return nil, err } log.Debug("Resolve dag get.") nd, err := s.DAG.Get(ctx, key.Key(h)) if err != nil { return nil, err } return s.ResolveLinks(ctx, nd, parts) }
func GetPublicKey(r IpfsRouting, ctx context.Context, pkhash []byte) (ci.PubKey, error) { if dht, ok := r.(PubKeyFetcher); ok { // If we have a DHT as our routing system, use optimized fetcher return dht.GetPublicKey(ctx, peer.ID(pkhash)) } else { key := key.Key("/pk/" + string(pkhash)) pkval, err := r.GetValue(ctx, key) if err != nil { return nil, err } // get PublicKey from node.Data return ci.UnmarshalPublicKey(pkval) } }
func ParseKeyToPath(txt string) (Path, error) { if txt == "" { return "", ErrNoComponents } chk := b58.Decode(txt) if len(chk) == 0 { return "", errors.New("not a key") } if _, err := mh.Cast(chk); err != nil { return "", err } return FromKey(key.Key(chk)), nil }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := peer.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := peer.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
// ResolveLinks iteratively resolves names by walking the link hierarchy. // Every node is fetched from the DAGService, resolving the next name. // Returns the list of nodes forming the path, starting with ndd. This list is // guaranteed never to be empty. // // ResolveLinks(nd, []string{"foo", "bar", "baz"}) // would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) { result := make([]*merkledag.Node, 0, len(names)+1) result = append(result, ndd) nd := ndd // dup arg workaround // for each of the path components for _, name := range names { var next key.Key var nlink *merkledag.Link // for each of the links in nd, the current object for _, link := range nd.Links { if link.Name == name { next = key.Key(link.Hash) nlink = link break } } if next == "" { n, _ := nd.Multihash() return result, ErrNoLink{name: name, node: n} } if nlink.Node == nil { // fetch object for link and assign to nd ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() var err error nd, err = s.DAG.Get(ctx, next) if err != nil { return append(result, nd), err } nlink.Node = nd } else { nd = nlink.Node } result = append(result, nlink.Node) } return result, nil }
func (t *Batch) Add(nd *Node) (key.Key, error) { d, err := nd.Encoded(false) if err != nil { return "", err } b := new(blocks.Block) b.Data = d b.Multihash, err = nd.Multihash() if err != nil { return "", err } k := key.Key(b.Multihash) t.blocks = append(t.blocks, b) t.size += len(b.Data) if t.size > t.MaxSize { return k, t.Commit() } return k, nil }
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleGetValue", p).Done() log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is there even a key? k := key.Key(pmes.GetKey()) if k == "" { return nil, errors.New("handleGetValue but no key was provided") // TODO: send back an error response? could be bad, but the other node's hanging. } rec, err := dht.checkLocalDatastore(k) if err != nil { return nil, err } resp.Record = rec // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if len(closer) > 0 { closerinfos := peer.PeerInfos(dht.peerstore, closer) for _, pi := range closerinfos { log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) if len(pi.Addrs) < 1 { log.Errorf(`no addresses on peer being sent! [local:%s] [sending:%s] [remote:%s]`, dht.self, pi.ID, p) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) } return resp, nil }
func getBlockForKey(req cmds.Request, skey string) (*blocks.Block, error) { n, err := req.InvocContext().GetNode() if err != nil { return nil, err } if !u.IsValidHash(skey) { return nil, errors.New("Not a valid hash") } h, err := mh.FromB58String(skey) if err != nil { return nil, err } k := key.Key(h) b, err := n.Blocks.GetBlock(req.Context(), k) if err != nil { return nil, err } log.Debugf("ipfs block: got block with key: %q", b.Key()) return b, nil }
// KeyForPublicKey returns the key used to retrieve public keys // from the dht. func KeyForPublicKey(id peer.ID) key.Key { return key.Key("/pk/" + string(id)) }
func (s *Server) handleMessage( ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) { defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done() var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel()) switch req.GetType() { case dhtpb.Message_GET_VALUE: rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.Record = rawRecord return p, response case dhtpb.Message_PUT_VALUE: // FIXME: verify complains that the peer's ID is not present in the // peerstore. Mocknet problem? // if err := verify(s.peerstore, req.GetRecord()); err != nil { // log.Event(ctx, "validationFailed", req, p) // return "", nil // } putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord()) return p, req case dhtpb.Message_FIND_NODE: p := s.peerstore.PeerInfo(peer.ID(req.GetKey())) pri := []dhtpb.PeerRoutingInfo{ { PeerInfo: p, // Connectedness: TODO }, } response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri) return p.ID, response case dhtpb.Message_ADD_PROVIDER: for _, provider := range req.GetProviderPeers() { providerID := peer.ID(provider.GetId()) if providerID == p { store := []*dhtpb.Message_Peer{provider} storeProvidersToPeerstore(s.peerstore, p, store) if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil { return "", nil } } else { log.Event(ctx, "addProviderBadRequest", p, req) } } return "", nil case dhtpb.Message_GET_PROVIDERS: providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.ProviderPeers = providers return p, response case dhtpb.Message_PING: return p, req default: } return "", nil }
// nearestPeersToQuery returns the routing tables closest peers. func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { key := key.Key(pmes.GetKey()) closer := dht.routingTable.NearestPeers(kb.ConvertKey(key), count) return closer }
func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { urlPath := r.URL.Path ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } // TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above? err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1]) if err != nil { webError(w, "Could not delete link", err, http.StatusBadRequest) return } newnode := pathNodes[len(pathNodes)-1] for i := len(pathNodes) - 2; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components[:len(components)-1], "/"), http.StatusCreated) }
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): either ask mildred about the flow of this or rewrite it webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError) return urlPath := r.URL.Path pathext := urlPath[5:] var err error if urlPath == ipfsPathPrefix+"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn/" { i.putEmptyDirHandler(w, r) return } var newnode *dag.Node if pathext[len(pathext)-1] == '/' { newnode = uio.NewEmptyDirectory() } else { newnode, err = i.newDagFromReader(r.Body) if err != nil { webError(w, "Could not create DAG from request", err, http.StatusInternalServerError) return } } ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } if len(components) < 1 { err = fmt.Errorf("Cannot override existing object") webError(w, "http gateway", err, http.StatusBadRequest) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() // TODO(cryptix): could this be core.Resolve() too? rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } // resolving path components into merkledag nodes. if a component does not // resolve, create empty directories (which will be linked and populated below.) pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if _, ok := err.(path.ErrNoLink); ok { // Create empty directories, links will be made further down the code for len(pathNodes) < len(components) { pathNodes = append(pathNodes, uio.NewDirectory(i.node.DAG).GetNode()) } } else if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } for i := len(pathNodes) - 1; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components, "/"), http.StatusCreated) }