func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, fmt.Errorf("pin: %s", err) } dagnodes = append(dagnodes, dagnode) } var out []key.Key for _, dagnode := range dagnodes { k, err := dagnode.Key() if err != nil { return nil, err } ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() err = n.Pinning.Pin(ctx, dagnode, recursive) if err != nil { return nil, fmt.Errorf("pin: %s", err) } out = append(out, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return out, nil }
func Unpin(n *core.IpfsNode, paths []string, recursive bool) ([]key.Key, error) { // TODO(cryptix): do we want a ctx as first param for (Un)Pin() as well, just like core.Resolve? ctx := n.Context() dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, err } dagnodes = append(dagnodes, dagnode) } var unpinned []key.Key for _, dagnode := range dagnodes { k, _ := dagnode.Key() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() err := n.Pinning.Unpin(ctx, k, recursive) if err != nil { return nil, err } unpinned = append(unpinned, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return unpinned, nil }
func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, err } dagnodes = append(dagnodes, dagnode) } var unpinned []key.Key for _, dagnode := range dagnodes { k, _ := dagnode.Key() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() err := n.Pinning.Unpin(ctx, k, recursive) if err != nil { return nil, err } unpinned = append(unpinned, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return unpinned, nil }
func Cat(ctx context.Context, n *core.IpfsNode, pstr string) (*uio.DagReader, error) { dagNode, err := core.Resolve(ctx, n, path.Path(pstr)) if err != nil { return nil, err } return uio.NewDagReader(ctx, dagNode, n.DAG) }
func TestResolveNoComponents(t *testing.T) { n, err := coremock.NewMockNode() if n == nil || err != nil { t.Fatal("Should have constructed a mock node", err) } _, err = core.Resolve(n.Context(), n, path.Path("/ipns/")) if err != path.ErrNoComponents { t.Fatal("Should error with no components (/ipns/).", err) } _, err = core.Resolve(n.Context(), n, path.Path("/ipfs/")) if err != path.ErrNoComponents { t.Fatal("Should error with no components (/ipfs/).", err) } }
func get(ctx context.Context, node *core.IpfsNode, p string, compression int) (io.Reader, error) { pathToResolve := path.Path(p) dagnode, err := core.Resolve(ctx, node, pathToResolve) if err != nil { return nil, err } return utar.NewReader(pathToResolve, node.DAG, dagnode, compression) }
func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]*dag.Node, error) { objects := make([]*dag.Node, len(paths)) for i, p := range paths { o, err := core.Resolve(ctx, n, path.Path(p)) if err != nil { return nil, err } objects[i] = o } return objects, nil }
func cat(ctx context.Context, node *core.IpfsNode, fpath string) (io.Reader, uint64, error) { dagnode, err := core.Resolve(ctx, node, path.Path(fpath)) if err != nil { return nil, 0, err } reader, err := uio.NewDagReader(ctx, dagnode, node.DAG) if err != nil { return nil, 0, err } length := uint64(reader.Size()) return reader, length, nil }
// Lookup performs a lookup under this node. func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { log.Debugf("Root Lookup: '%s'", name) switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } nd, err := s.Ipfs.Resolver.ResolvePath(ctx, path.Path(name)) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } return &Node{Ipfs: s.Ipfs, Nd: nd}, nil }
func cat(ctx context.Context, node *core.IpfsNode, paths []string) ([]io.Reader, uint64, error) { readers := make([]io.Reader, 0, len(paths)) length := uint64(0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, node, path.Path(fpath)) if err != nil { return nil, 0, err } read, err := uio.NewDagReader(ctx, dagnode, node.DAG) if err != nil { return nil, 0, err } readers = append(readers, read) length += uint64(read.Size()) } return readers, length, nil }
func pinLsKeys(args []string, typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) { keys := make(map[string]RefKeyObject) for _, p := range args { dagNode, err := core.Resolve(ctx, n, path.Path(p)) if err != nil { return nil, err } k, err := dagNode.Key() if err != nil { return nil, err } mode, ok := pin.StringToPinMode(typeStr) if !ok { return nil, fmt.Errorf("Invalid pin mode '%s'", typeStr) } pinType, pinned, err := n.Pinning.IsPinnedWithType(k, mode) if err != nil { return nil, err } if !pinned { return nil, fmt.Errorf("Path '%s' is not pinned", p) } switch pinType { case "direct", "indirect", "recursive", "internal": default: pinType = "indirect through " + pinType } keys[k.B58String()] = RefKeyObject{ Type: pinType, } } return keys, nil }
// Lookup performs a lookup under this node. func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { log.Debugf("Root Lookup: '%s'", name) switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } nd, err := s.Ipfs.Resolver.ResolvePath(ctx, path.Path(name)) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } pbnd, ok := nd.(*mdag.ProtoNode) if !ok { log.Error("fuse node was not a protobuf node") return nil, fuse.ENOTSUP } return &Node{Ipfs: s.Ipfs, Nd: pbnd}, nil }
func (rp *Republisher) getLastVal(k key.Key) (path.Path, uint64, error) { ival, err := rp.ds.Get(k.DsKey()) if err != nil { // not found means we dont have a previously published entry return "", 0, errNoEntry } val := ival.([]byte) dhtrec := new(dhtpb.Record) err = proto.Unmarshal(val, dhtrec) if err != nil { return "", 0, err } // extract published data from record e := new(pb.IpnsEntry) err = proto.Unmarshal(dhtrec.GetValue(), e) if err != nil { return "", 0, err } return path.Path(e.Value), e.GetSequence(), nil }
func TestOrdering(t *testing.T) { // select timestamp so selection is deterministic ts := time.Unix(1000000, 0) // generate a key for signing the records r := u.NewSeededRand(15) // generate deterministic keypair priv, _, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, r) if err != nil { t.Fatal(err) } e1, err := CreateRoutingEntryData(priv, path.Path("foo"), 1, ts.Add(time.Hour)) if err != nil { t.Fatal(err) } e2, err := CreateRoutingEntryData(priv, path.Path("bar"), 2, ts.Add(time.Hour)) if err != nil { t.Fatal(err) } e3, err := CreateRoutingEntryData(priv, path.Path("baz"), 3, ts.Add(time.Hour)) if err != nil { t.Fatal(err) } e4, err := CreateRoutingEntryData(priv, path.Path("cat"), 3, ts.Add(time.Hour*2)) if err != nil { t.Fatal(err) } e5, err := CreateRoutingEntryData(priv, path.Path("dog"), 4, ts.Add(time.Hour*3)) if err != nil { t.Fatal(err) } e6, err := CreateRoutingEntryData(priv, path.Path("fish"), 4, ts.Add(time.Hour*3)) if err != nil { t.Fatal(err) } // e1 is the only record, i hope it gets this right err = AssertSelected(e1, e1) if err != nil { t.Fatal(err) } // e2 has the highest sequence number err = AssertSelected(e2, e1, e2) if err != nil { t.Fatal(err) } // e3 has the highest sequence number err = AssertSelected(e3, e1, e2, e3) if err != nil { t.Fatal(err) } // e4 has a higher timeout err = AssertSelected(e4, e1, e2, e3, e4) if err != nil { t.Fatal(err) } // e5 has the highest sequence number err = AssertSelected(e5, e1, e2, e3, e4, e5) if err != nil { t.Fatal(err) } // e6 should be selected as its signauture will win in the comparison err = AssertSelected(e6, e1, e2, e3, e4, e5, e6) if err != nil { t.Fatal(err) } _ = []interface{}{e1, e2, e3, e4, e5, e6} }
func (x *Start) Execute(args []string) error { printSplashScreen() // set repo path var repoPath string if x.Testnet { repoPath = "~/.openbazaar2-testnet" } else { repoPath = "~/.openbazaar2" } expPath, _ := homedir.Expand(filepath.Clean(repoPath)) // Database sqliteDB, err := db.Create(expPath, x.Password, x.Testnet) if err != nil { return err } // logging w := &lumberjack.Logger{ Filename: path.Join(expPath, "logs", "ob.log"), MaxSize: 10, // megabytes MaxBackups: 3, MaxAge: 30, //days } backendStdout := logging.NewLogBackend(os.Stdout, "", 0) backendFile := logging.NewLogBackend(w, "", 0) backendStdoutFormatter := logging.NewBackendFormatter(backendStdout, stdoutLogFormat) backendFileFormatter := logging.NewBackendFormatter(backendFile, fileLogFormat) logging.SetBackend(backendFileFormatter, backendStdoutFormatter) ipfslogging.LdJSONFormatter() w2 := &lumberjack.Logger{ Filename: path.Join(expPath, "logs", "ipfs.log"), MaxSize: 10, // megabytes MaxBackups: 3, MaxAge: 30, //days } ipfslogging.Output(w2)() // initalize the ipfs repo if it doesn't already exist err = repo.DoInit(os.Stdout, expPath, 4096, x.Testnet, x.Password, sqliteDB.Config().Init) if err != nil && err != repo.ErrRepoExists { log.Error(err) return err } // if the db can't be decrypted, exit if sqliteDB.Config().IsEncrypted() { return encryptedDatabaseError } // ipfs node setup r, err := fsrepo.Open(repoPath) if err != nil { log.Error(err) return err } cctx, cancel := context.WithCancel(context.Background()) defer cancel() cfg, err := r.Config() if err != nil { log.Error(err) return err } identityKey, err := sqliteDB.Config().GetIdentityKey() if err != nil { log.Error(err) return err } identity, err := ipfs.IdentityFromKey(identityKey) if err != nil { return err } cfg.Identity = identity // Run stun and set uTP port if x.STUN { for i, addr := range cfg.Addresses.Swarm { m, _ := ma.NewMultiaddr(addr) p := m.Protocols() if p[0].Name == "ip4" && p[1].Name == "udp" && p[2].Name == "utp" { port, serr := net.Stun() if serr != nil { log.Error(serr) return err } cfg.Addresses.Swarm = append(cfg.Addresses.Swarm[:i], cfg.Addresses.Swarm[i+1:]...) cfg.Addresses.Swarm = append(cfg.Addresses.Swarm, "/ip4/0.0.0.0/udp/"+strconv.Itoa(port)+"/utp") break } } } ncfg := &ipfscore.BuildCfg{ Repo: r, Online: true, } nd, err := ipfscore.NewNode(cctx, ncfg) if err != nil { log.Error(err) return err } ctx := commands.Context{} ctx.Online = true ctx.ConfigRoot = expPath ctx.LoadConfig = func(path string) (*config.Config, error) { return fsrepo.ConfigAt(expPath) } ctx.ConstructNode = func() (*ipfscore.IpfsNode, error) { return nd, nil } log.Info("Peer ID: ", nd.Identity.Pretty()) printSwarmAddrs(nd) // Get current directory root hash _, ipnskey := namesys.IpnsKeysForID(nd.Identity) ival, _ := nd.Repo.Datastore().Get(ipnskey.DsKey()) val := ival.([]byte) dhtrec := new(dhtpb.Record) proto.Unmarshal(val, dhtrec) e := new(namepb.IpnsEntry) proto.Unmarshal(dhtrec.GetValue(), e) // Wallet mn, err := sqliteDB.Config().GetMnemonic() if err != nil { log.Error(err) return err } var params chaincfg.Params if !x.Testnet { params = chaincfg.MainNetParams } else { params = chaincfg.TestNet3Params } libbitcoinServers, err := repo.GetLibbitcoinServers(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } maxFee, err := repo.GetMaxFee(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } feeApi, err := repo.GetFeeAPI(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } low, medium, high, err := repo.GetDefaultFees(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } wallet := libbitcoin.NewLibbitcoinWallet(mn, ¶ms, sqliteDB, libbitcoinServers, maxFee, low, medium, high, feeApi) // Offline messaging storage var storage sto.OfflineMessagingStorage if x.Storage == "self-hosted" || x.Storage == "" { storage = selfhosted.NewSelfHostedStorage(expPath, ctx) } else if x.Storage == "dropbox" { token, err := repo.GetDropboxApiToken(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } else if token == "" { err = errors.New("Dropbox token not set in config file") log.Error(err) return err } storage, err = dropbox.NewDropBoxStorage(token) if err != nil { log.Error(err) return err } } else { err = errors.New("Invalid storage option") log.Error(err) return err } // OpenBazaar node setup core.Node = &core.OpenBazaarNode{ Context: ctx, IpfsNode: nd, RootHash: ipath.Path(e.Value).String(), RepoPath: expPath, Datastore: sqliteDB, Wallet: wallet, MessageStorage: storage, } var gwErrc <-chan error var cb <-chan bool if len(cfg.Addresses.Gateway) > 0 { var err error err, cb, gwErrc = serveHTTPGateway(core.Node) if err != nil { log.Error(err) return err } } // Wait for gateway to start before starting the network service. // This way the websocket channel we pass into the service gets created first. // FIXME: There has to be a better way for b := range cb { if b == true { OBService := service.SetupOpenBazaarService(nd, core.Node.Broadcast, ctx, sqliteDB) core.Node.Service = OBService MR := net.NewMessageRetriever(sqliteDB, ctx, nd, OBService, 16, core.Node.SendOfflineAck) go MR.Run() core.Node.MessageRetriever = MR PR := net.NewPointerRepublisher(nd, sqliteDB) go PR.Run() core.Node.PointerRepublisher = PR } break } for err := range gwErrc { fmt.Println(err) } return nil }
paths := req.Arguments() output := LsOutput{ Arguments: map[string]string{}, Objects: map[string]*LsObject{}, } for _, fpath := range paths { ctx := req.Context() resolver := &path.Resolver{ DAG: node.DAG, ResolveOnce: uio.ResolveUnixfsOnce, } merkleNode, err := core.Resolve(ctx, node.Namesys, resolver, path.Path(fpath)) if err != nil { res.SetError(err, cmds.ErrNormal) return } c := merkleNode.Cid() hash := c.String() output.Arguments[fpath] = hash if _, ok := output.Objects[hash]; ok { // duplicate argument for an already-listed node continue }
popts.pubValidTime = d } ctx := req.Context() if ttl, found, _ := req.Option("ttl").String(); found { d, err := time.ParseDuration(ttl) if err != nil { res.SetError(err, cmds.ErrNormal) return } ctx = context.WithValue(ctx, "ipns-publish-ttl", d) } output, err := publish(ctx, n, n.PrivateKey, path.Path(pstr), popts) if err != nil { res.SetError(err, cmds.ErrNormal) return } res.SetOutput(output) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { v := res.Output().(*IpnsEntry) s := fmt.Sprintf("Published to %s: %s\n", v.Name, v.Value) return strings.NewReader(s), nil }, }, Type: IpnsEntry{}, }
func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() urlPath := r.URL.Path if i.config.BlockList != nil && i.config.BlockList.ShouldBlock(urlPath) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("403 - Forbidden")) return } nd, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { webError(w, "Path Resolve error", err, http.StatusBadRequest) return } etag := gopath.Base(urlPath) if r.Header.Get("If-None-Match") == etag { w.WriteHeader(http.StatusNotModified) return } w.Header().Set("X-IPFS-Path", urlPath) // Suborigin header, sandboxes apps from each other in the browser (even // though they are served from the same gateway domain). NOTE: This is not // yet widely supported by browsers. pathRoot := strings.SplitN(urlPath, "/", 4)[2] w.Header().Set("Suborigin", pathRoot) dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil && err != uio.ErrIsDir { // not a directory and still an error internalWebError(w, err) return } // set these headers _after_ the error, for we may just not have it // and dont want the client to cache a 500 response... // and only if it's /ipfs! // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() if strings.HasPrefix(urlPath, ipfsPathPrefix) { w.Header().Set("Etag", etag) w.Header().Set("Cache-Control", "public, max-age=29030400") // set modtime to a really long time ago, since files are immutable and should stay cached modtime = time.Unix(1, 0) } if err == nil { defer dr.Close() _, name := gopath.Split(urlPath) http.ServeContent(w, r, name, modtime, dr) return } // storage for directory listing var dirListing []directoryItem // loop through files foundIndex := false for _, link := range nd.Links { if link.Name == "index.html" { if urlPath[len(urlPath)-1] != '/' { http.Redirect(w, r, urlPath+"/", 302) return } log.Debug("found index") foundIndex = true // return index page instead. nd, err := core.Resolve(ctx, i.node, path.Path(urlPath+"/index.html")) if err != nil { internalWebError(w, err) return } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil { internalWebError(w, err) return } defer dr.Close() // write to request if r.Method != "HEAD" { io.Copy(w, dr) } break } di := directoryItem{link.Size, link.Name, gopath.Join(urlPath, link.Name)} dirListing = append(dirListing, di) } if !foundIndex { // template and return directory listing hndlr := webHandler{ "listing": dirListing, "path": urlPath, } if r.Method != "HEAD" { if err := i.dirList.Execute(w, hndlr); err != nil { internalWebError(w, err) return } } } }
if err != nil { res.SetError(err, cmds.ErrNormal) return } ctx = context.WithValue(ctx, "ipns-publish-ttl", d) } kname, _, _ := req.Option("key").String() k, err := n.GetKey(kname) if err != nil { res.SetError(err, cmds.ErrNormal) return } output, err := publish(ctx, n, k, path.Path(pstr), popts) if err != nil { res.SetError(err, cmds.ErrNormal) return } res.SetOutput(output) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { v := res.Output().(*IpnsEntry) s := fmt.Sprintf("Published to %s: %s\n", v.Name, v.Value) return strings.NewReader(s), nil }, }, Type: IpnsEntry{}, }
node, err := req.Context().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) return } paths := req.Arguments() output := LsOutput{ Arguments: map[string]string{}, Objects: map[string]*LsObject{}, } for _, fpath := range paths { ctx := req.Context().Context merkleNode, err := core.Resolve(ctx, node, path.Path(fpath)) if err != nil { res.SetError(err, cmds.ErrNormal) return } key, err := merkleNode.Key() if err != nil { res.SetError(err, cmds.ErrNormal) return } hash := key.B58String() output.Arguments[fpath] = hash if _, ok := output.Objects[hash]; ok {
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): either ask mildred about the flow of this or rewrite it webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError) return urlPath := r.URL.Path pathext := urlPath[5:] var err error if urlPath == ipfsPathPrefix+"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn/" { i.putEmptyDirHandler(w, r) return } var newnode *dag.Node if pathext[len(pathext)-1] == '/' { newnode = uio.NewEmptyDirectory() } else { newnode, err = i.newDagFromReader(r.Body) if err != nil { webError(w, "Could not create DAG from request", err, http.StatusInternalServerError) return } } ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } if len(components) < 1 { err = fmt.Errorf("Cannot override existing object") webError(w, "http gateway", err, http.StatusBadRequest) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() // TODO(cryptix): could this be core.Resolve() too? rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } // resolving path components into merkledag nodes. if a component does not // resolve, create empty directories (which will be linked and populated below.) pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if _, ok := err.(path.ErrNoLink); ok { // Create empty directories, links will be made further down the code for len(pathNodes) < len(components) { pathNodes = append(pathNodes, uio.NewDirectory(i.node.DAG).GetNode()) } } else if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } for i := len(pathNodes) - 1; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } err = i.node.DAG.AddRecursive(newnode) if err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components, "/"), http.StatusCreated) }
func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { urlPath := r.URL.Path ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } // TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above? err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1]) if err != nil { webError(w, "Could not delete link", err, http.StatusBadRequest) return } newnode := pathNodes[len(pathNodes)-1] for i := len(pathNodes) - 2; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, gopath.Join(ipfsPathPrefix+key.String(), path.Join(components[:len(components)-1])), http.StatusCreated) }
if err != nil { res.SetError(err, cmds.ErrNormal) return } // get options early -> exit early in case of error if _, _, err := req.Option("headers").Bool(); err != nil { res.SetError(err, cmds.ErrNormal) return } paths := req.Arguments() var dagnodes []*merkledag.Node for _, fpath := range paths { dagnode, err := core.Resolve(req.Context(), node, path.Path(fpath)) if err != nil { res.SetError(err, cmds.ErrNormal) return } dagnodes = append(dagnodes, dagnode) } output := make([]LsObject, len(req.Arguments())) for i, dagnode := range dagnodes { output[i] = LsObject{ Hash: paths[i], Links: make([]LsLink, len(dagnode.Links)), } for j, link := range dagnode.Links { link.Node, err = link.GetNode(req.Context(), node.DAG)
func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() urlPath := r.URL.Path if i.config.BlockList != nil && i.config.BlockList.ShouldBlock(urlPath) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("403 - Forbidden")) return } nd, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { webError(w, "Path Resolve error", err, http.StatusBadRequest) return } etag := gopath.Base(urlPath) if r.Header.Get("If-None-Match") == etag { w.WriteHeader(http.StatusNotModified) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("X-IPFS-Path", urlPath) // Suborigin header, sandboxes apps from each other in the browser (even // though they are served from the same gateway domain). NOTE: This is not // yet widely supported by browsers. pathRoot := strings.SplitN(urlPath, "/", 4)[2] w.Header().Set("Suborigin", pathRoot) dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil && err != uio.ErrIsDir { // not a directory and still an error internalWebError(w, err) return } // set these headers _after_ the error, for we may just not have it // and dont want the client to cache a 500 response... // and only if it's /ipfs! // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() if strings.HasPrefix(urlPath, ipfsPathPrefix) { w.Header().Set("Etag", etag) w.Header().Set("Cache-Control", "public, max-age=29030400") // set modtime to a really long time ago, since files are immutable and should stay cached modtime = time.Unix(1, 0) } if err == nil { defer dr.Close() _, name := gopath.Split(urlPath) http.ServeContent(w, r, name, modtime, dr) return } // storage for directory listing var dirListing []directoryItem // loop through files foundIndex := false for _, link := range nd.Links { if link.Name == "index.html" { if urlPath[len(urlPath)-1] != '/' { http.Redirect(w, r, urlPath+"/", 302) return } log.Debug("found index") foundIndex = true // return index page instead. nd, err := core.Resolve(ctx, i.node, path.Path(urlPath+"/index.html")) if err != nil { internalWebError(w, err) return } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil { internalWebError(w, err) return } defer dr.Close() // write to request if r.Method != "HEAD" { io.Copy(w, dr) } break } di := directoryItem{link.Size, link.Name, gopath.Join(urlPath, link.Name)} dirListing = append(dirListing, di) } if !foundIndex { if r.Method != "HEAD" { // construct the correct back link // https://github.com/ipfs/go-ipfs/issues/1365 var backLink string = r.URL.Path // don't go further up than /ipfs/$hash/ pathSplit := strings.Split(backLink, "/") switch { // keep backlink case len(pathSplit) == 3: // url: /ipfs/$hash // keep backlink case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/ // add the correct link depending on wether the path ends with a slash default: if strings.HasSuffix(backLink, "/") { backLink += "./.." } else { backLink += "/.." } } tplData := listingTemplateData{ Listing: dirListing, Path: urlPath, BackLink: backLink, } err := listingTemplate.Execute(w, tplData) if err != nil { internalWebError(w, err) return } } } }
func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(i.node.Context(), time.Hour) // the hour is a hard fallback, we don't expect it to happen, but just in case defer cancel() if cn, ok := w.(http.CloseNotifier); ok { clientGone := cn.CloseNotify() go func() { select { case <-clientGone: case <-ctx.Done(): } cancel() }() } urlPath := r.URL.Path // If the gateway is behind a reverse proxy and mounted at a sub-path, // the prefix header can be set to signal this sub-path. // It will be prepended to links in directory listings and the index.html redirect. prefix := "" if prefixHdr := r.Header["X-Ipfs-Gateway-Prefix"]; len(prefixHdr) > 0 { log.Debugf("X-Ipfs-Gateway-Prefix: %s", prefixHdr[0]) prefix = prefixHdr[0] } // IPNSHostnameOption might have constructed an IPNS path using the Host header. // In this case, we need the original path for constructing redirects // and links that match the requested URL. // For example, http://example.net would become /ipns/example.net, and // the redirects and links would end up as http://example.net/ipns/example.net originalUrlPath := prefix + urlPath ipnsHostname := false if hdr := r.Header["X-Ipns-Original-Path"]; len(hdr) > 0 { originalUrlPath = prefix + hdr[0] ipnsHostname = true } if i.config.BlockList != nil && i.config.BlockList.ShouldBlock(urlPath) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("403 - Forbidden")) return } nd, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { webError(w, "Path Resolve error", err, http.StatusBadRequest) return } etag := gopath.Base(urlPath) if r.Header.Get("If-None-Match") == etag { w.WriteHeader(http.StatusNotModified) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("X-IPFS-Path", urlPath) // set 'allowed' headers w.Header().Set("Access-Control-Allow-Headers", "X-Stream-Output, X-Chunked-Output") // expose those headers w.Header().Set("Access-Control-Expose-Headers", "X-Stream-Output, X-Chunked-Output") // Suborigin header, sandboxes apps from each other in the browser (even // though they are served from the same gateway domain). // // Omited if the path was treated by IPNSHostnameOption(), for example // a request for http://example.net/ would be changed to /ipns/example.net/, // which would turn into an incorrect Suborigin: example.net header. // // NOTE: This is not yet widely supported by browsers. if !ipnsHostname { pathRoot := strings.SplitN(urlPath, "/", 4)[2] w.Header().Set("Suborigin", pathRoot) } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil && err != uio.ErrIsDir { // not a directory and still an error internalWebError(w, err) return } // set these headers _after_ the error, for we may just not have it // and dont want the client to cache a 500 response... // and only if it's /ipfs! // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() if strings.HasPrefix(urlPath, ipfsPathPrefix) { w.Header().Set("Etag", etag) w.Header().Set("Cache-Control", "public, max-age=29030400") // set modtime to a really long time ago, since files are immutable and should stay cached modtime = time.Unix(1, 0) } if err == nil { defer dr.Close() name := gopath.Base(urlPath) http.ServeContent(w, r, name, modtime, dr) return } // storage for directory listing var dirListing []directoryItem // loop through files foundIndex := false for _, link := range nd.Links { if link.Name == "index.html" { log.Debugf("found index.html link for %s", urlPath) foundIndex = true if urlPath[len(urlPath)-1] != '/' { // See comment above where originalUrlPath is declared. http.Redirect(w, r, originalUrlPath+"/", 302) log.Debugf("redirect to %s", originalUrlPath+"/") return } // return index page instead. nd, err := core.Resolve(ctx, i.node, path.Path(urlPath+"/index.html")) if err != nil { internalWebError(w, err) return } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil { internalWebError(w, err) return } defer dr.Close() // write to request http.ServeContent(w, r, "index.html", modtime, dr) break } // See comment above where originalUrlPath is declared. di := directoryItem{humanize.Bytes(link.Size), link.Name, gopath.Join(originalUrlPath, link.Name)} dirListing = append(dirListing, di) } if !foundIndex { if r.Method != "HEAD" { // construct the correct back link // https://github.com/ipfs/go-ipfs/issues/1365 var backLink string = prefix + urlPath // don't go further up than /ipfs/$hash/ pathSplit := path.SplitList(backLink) switch { // keep backlink case len(pathSplit) == 3: // url: /ipfs/$hash // keep backlink case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/ // add the correct link depending on wether the path ends with a slash default: if strings.HasSuffix(backLink, "/") { backLink += "./.." } else { backLink += "/.." } } // strip /ipfs/$hash from backlink if IPNSHostnameOption touched the path. if ipnsHostname { backLink = prefix + "/" if len(pathSplit) > 5 { // also strip the trailing segment, because it's a backlink backLinkParts := pathSplit[3 : len(pathSplit)-2] backLink += path.Join(backLinkParts) + "/" } } // See comment above where originalUrlPath is declared. tplData := listingTemplateData{ Listing: dirListing, Path: originalUrlPath, BackLink: backLink, } err := listingTemplate.Execute(w, tplData) if err != nil { internalWebError(w, err) return } } } }
switch len(args) { case 2: name = args[0] pstr = args[1] if name != n.Identity.Pretty() { res.SetError(errors.New("keychains not yet implemented"), cmds.ErrNormal) return } case 1: // name = n.Identity.Pretty() pstr = args[0] } // TODO n.Keychain.Get(name).PrivKey // TODO(cryptix): is req.Context().Context a child of n.Context()? output, err := publish(req.Context().Context, n, n.PrivateKey, path.Path(pstr)) if err != nil { res.SetError(err, cmds.ErrNormal) return } res.SetOutput(output) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { v := res.Output().(*IpnsEntry) s := fmt.Sprintf("Published to %s: %s\n", v.Name, v.Value) return strings.NewReader(s), nil }, }, Type: IpnsEntry{}, }
Note that the "--encoding" option does not affect the output, since the output is the raw data of the object. `, }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, false, "Key of the object to retrieve, in base58-encoded multihash format.").EnableStdin(), }, Run: func(req cmds.Request, res cmds.Response) { n, err := req.InvocContext().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) return } fpath := path.Path(req.Arguments()[0]) node, err := core.Resolve(req.Context(), n, fpath) if err != nil { res.SetError(err, cmds.ErrNormal) return } res.SetOutput(bytes.NewReader(node.Data())) }, } var ObjectLinksCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Outputs the links pointed to by the specified object.", ShortDescription: ` 'ipfs object links' is a plumbing command for retrieving the links from a DAG node. It outputs to stdout, and <key> is a base58 encoded
//NamePublish publish data whose key is hash as myself nodename. func (m *Self) NamePublish() error { p := path.Path("/ipfs/" + m.myIpns.Pretty()) log.Println("publishing", p) return m.ipfsNode.Namesys.Publish(m.ctx, m.ipfsNode.PrivateKey, p) }