// converts the Node object into a real dag.ProtoNode func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) { dagnode := new(dag.ProtoNode) switch dataFieldEncoding { case "text": dagnode.SetData([]byte(nd.Data)) case "base64": data, _ := base64.StdEncoding.DecodeString(nd.Data) dagnode.SetData(data) default: return nil, fmt.Errorf("Unkown data field encoding") } dagnode.SetLinks(make([]*node.Link, len(nd.Links))) for i, link := range nd.Links { c, err := cid.Decode(link.Hash) if err != nil { return nil, err } dagnode.Links()[i] = &node.Link{ Name: link.Name, Size: link.Size, Cid: c, } } return dagnode, nil }
// ResolveToKey resolves a path to a key. // // It first checks if the path is already in the form of just a key (<key> or // /ipfs/<key>) and returns immediately if so. Otherwise, it falls back onto // Resolve to perform resolution of the dagnode being referenced. func ResolveToCid(ctx context.Context, n *IpfsNode, p path.Path) (*cid.Cid, error) { // If the path is simply a key, parse and return it. Parsed paths are already // normalized (read: prepended with /ipfs/ if needed), so segment[1] should // always be the key. if p.IsJustAKey() { return cid.Decode(p.Segments()[1]) } // Fall back onto regular dagnode resolution. Retrieve the second-to-last // segment of the path and resolve its link to the last segment. head, tail, err := p.PopLastSegment() if err != nil { return nil, err } dagnode, err := Resolve(ctx, n.Namesys, n.Resolver, head) if err != nil { return nil, err } // Extract and return the key of the link to the target dag node. link, _, err := dagnode.ResolveLink([]string{tail}) if err != nil { return nil, err } return link.Cid, nil }
func init() { e, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") if err != nil { log.Error("failed to decode empty key constant") os.Exit(1) } emptyKey = e }
func ParseCidToPath(txt string) (Path, error) { if txt == "" { return "", ErrNoComponents } c, err := cid.Decode(txt) if err != nil { return "", err } return FromCid(c), nil }
func TestDifferentKeyObjectsWork(t *testing.T) { arc, bs, cd := createStores(t) bs.Put(exampleBlock) arc.Get(exampleBlock.Cid()) trap("has hit datastore", cd, t) cidstr := exampleBlock.Cid().String() ncid, err := cid.Decode(cidstr) if err != nil { t.Fatal(err) } arc.Has(ncid) }
func getBlockForKey(req cmds.Request, skey string) (blocks.Block, error) { if len(skey) == 0 { return nil, fmt.Errorf("zero length cid invalid") } n, err := req.InvocContext().GetNode() if err != nil { return nil, err } c, err := cid.Decode(skey) if err != nil { return nil, err } b, err := n.Blocks.GetBlock(req.Context(), c) if err != nil { return nil, err } log.Debugf("ipfs block: got block with key: %s", b.Cid()) return b, nil }
// SplitAbsPath clean up and split fpath. It extracts the first component (which // must be a Multihash) and return it separately. func SplitAbsPath(fpath Path) (*cid.Cid, []string, error) { log.Debugf("Resolve: '%s'", fpath) parts := fpath.Segments() if parts[0] == "ipfs" { parts = parts[1:] } // if nothing, bail. if len(parts) == 0 { return nil, nil, ErrNoComponents } c, err := cid.Decode(parts[0]) // first element in the path is a cid if err != nil { log.Debug("given path element is not a cid.\n") return nil, nil, err } return c, parts[1:], nil }
func TestMetadata(t *testing.T) { ctx := context.Background() // Make some random node ds := getDagserv(t) data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) r := bytes.NewReader(data) nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r)) if err != nil { t.Fatal(err) } c := nd.Cid() m := new(ft.Metadata) m.MimeType = "THIS IS A TEST" // Such effort, many compromise ipfsnode := &core.IpfsNode{DAG: ds} mdk, err := AddMetadataTo(ipfsnode, c.String(), m) if err != nil { t.Fatal(err) } rec, err := Metadata(ipfsnode, mdk) if err != nil { t.Fatal(err) } if rec.MimeType != m.MimeType { t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType) } cdk, err := cid.Decode(mdk) if err != nil { t.Fatal(err) } retnode, err := ds.Get(ctx, cdk) if err != nil { t.Fatal(err) } rtnpb, ok := retnode.(*merkledag.ProtoNode) if !ok { t.Fatal("expected protobuf node") } ndr, err := uio.NewDagReader(ctx, rtnpb, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(ndr) if err != nil { t.Fatal(err) } if !bytes.Equal(out, data) { t.Fatal("read incorrect data") } }
Options: []cmds.Option{ cmds.BoolOption("force", "f", "Ignore nonexistent blocks.").Default(false), cmds.BoolOption("quiet", "q", "Write minimal output.").Default(false), }, Run: func(req cmds.Request, res cmds.Response) { n, err := req.InvocContext().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) return } hashes := req.Arguments() force, _, _ := req.Option("force").Bool() quiet, _, _ := req.Option("quiet").Bool() cids := make([]*cid.Cid, 0, len(hashes)) for _, hash := range hashes { c, err := cid.Decode(hash) if err != nil { res.SetError(fmt.Errorf("invalid content id: %s (%s)", hash, err), cmds.ErrNormal) return } cids = append(cids, c) } outChan := make(chan interface{}) err = util.RmBlocks(n.Blockstore, n.Pinning, outChan, cids, util.RmBlocksOpts{ Quiet: quiet, Force: force, }) if err != nil { res.SetError(err, cmds.ErrNormal) return
dht, ok := n.Routing.(*ipdht.IpfsDHT) if !ok { res.SetError(ErrNotDHT, cmds.ErrNormal) return } numProviders := 20 outChan := make(chan interface{}) res.SetOutput((<-chan interface{})(outChan)) events := make(chan *notif.QueryEvent) ctx := notif.RegisterForQueryEvents(req.Context(), events) c, err := cid.Decode(req.Arguments()[0]) if err != nil { res.SetError(err, cmds.ErrNormal) return } pchan := dht.FindProvidersAsync(ctx, c, numProviders) go func() { defer close(outChan) for e := range events { outChan <- e } }() go func() { defer close(events)
} if !nd.OnlineMode() { res.SetError(errNotOnline, cmds.ErrClient) return } bs, ok := nd.Exchange.(*bitswap.Bitswap) if !ok { res.SetError(u.ErrCast(), cmds.ErrNormal) return } var ks []*cid.Cid for _, arg := range req.Arguments() { c, err := cid.Decode(arg) if err != nil { res.SetError(err, cmds.ErrNormal) return } ks = append(ks, c) } bs.CancelWants(ks) }, } var showWantlistCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show blocks currently on the wantlist.",
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): move me to ServeHTTP and pass into all handlers ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() rootPath, err := path.ParsePath(r.URL.Path) if err != nil { webError(w, "putHandler: IPFS path not valid", err, http.StatusBadRequest) return } rsegs := rootPath.Segments() if rsegs[0] == ipnsPathPrefix { webError(w, "putHandler: updating named entries not supported", errors.New("WritableGateway: ipns put not supported"), http.StatusBadRequest) return } var newnode node.Node if rsegs[len(rsegs)-1] == "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" { newnode = ft.EmptyDirNode() } else { putNode, err := i.newDagFromReader(r.Body) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } newnode = putNode } var newPath string if len(rsegs) > 1 { newPath = path.Join(rsegs[2:]) } var newcid *cid.Cid rnode, err := core.Resolve(ctx, i.node.Namesys, i.node.Resolver, rootPath) switch ev := err.(type) { case path.ErrNoLink: // ev.Node < node where resolve failed // ev.Name < new link // but we need to patch from the root c, err := cid.Decode(rsegs[1]) if err != nil { webError(w, "putHandler: bad input path", err, http.StatusBadRequest) return } rnode, err := i.node.DAG.Get(ctx, c) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } pbnd, ok := rnode.(*dag.ProtoNode) if !ok { webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest) return } e := dagutils.NewDagEditor(pbnd, i.node.DAG) err = e.InsertNodeAtPath(ctx, newPath, newnode, ft.EmptyDirNode) if err != nil { webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError) return } nnode, err := e.Finalize(i.node.DAG) if err != nil { webError(w, "putHandler: could not get node", err, http.StatusInternalServerError) return } newcid = nnode.Cid() case nil: pbnd, ok := rnode.(*dag.ProtoNode) if !ok { webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest) return } pbnewnode, ok := newnode.(*dag.ProtoNode) if !ok { webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest) return } // object set-data case pbnd.SetData(pbnewnode.Data()) newcid, err = i.node.DAG.Add(pbnd) if err != nil { nnk := newnode.Cid() rk := pbnd.Cid() webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.String(), rk.String()), err, http.StatusInternalServerError) return } default: log.Warningf("putHandler: unhandled resolve error %T", ev) webError(w, "could not resolve root DAG", ev, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", newcid.String()) http.Redirect(w, r, gopath.Join(ipfsPathPrefix, newcid.String(), newPath), http.StatusCreated) }
func TestAddGCLive(t *testing.T) { r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ PeerID: "Qmfoo", // required by offline node }, }, D: testutil.ThreadSafeCloserMapDatastore(), } node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } errs := make(chan error) out := make(chan interface{}) adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG) if err != nil { t.Fatal(err) } adder.Out = out dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA")) rfa := files.NewReaderFile("a", "a", dataa, nil) // make two files with pipes so we can 'pause' the add for timing of the test piper, pipew := io.Pipe() hangfile := files.NewReaderFile("b", "b", piper, nil) datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) rfd := files.NewReaderFile("d", "d", datad, nil) slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd}) addDone := make(chan struct{}) go func() { defer close(addDone) defer close(out) err := adder.AddFile(slf) if err != nil { t.Fatal(err) } }() addedHashes := make(map[string]struct{}) select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case <-addDone: t.Fatal("add shouldnt complete yet") } var gcout <-chan *cid.Cid gcstarted := make(chan struct{}) go func() { defer close(gcstarted) gcchan, err := gc.GC(context.Background(), node.Blockstore, node.DAG, node.Pinning, nil) if err != nil { log.Error("GC ERROR:", err) errs <- err return } gcout = gcchan }() // gc shouldnt start until we let the add finish its current file. pipew.Write([]byte("some data for file b")) select { case <-gcstarted: t.Fatal("gc shouldnt have started yet") case err := <-errs: t.Fatal(err) default: } time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock // finish write and unblock gc pipew.Close() // receive next object from adder select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case err := <-errs: t.Fatal(err) } select { case <-gcstarted: case err := <-errs: t.Fatal(err) } for k := range gcout { if _, ok := addedHashes[k.String()]; ok { t.Fatal("gc'ed a hash we just added") } } var last *cid.Cid for a := range out { // wait for it to finish c, err := cid.Decode(a.(*AddedObject).Hash) if err != nil { t.Fatal(err) } last = c } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() set := cid.NewSet() err = dag.EnumerateChildren(ctx, node.DAG, last, set.Visit, false) if err != nil { t.Fatal(err) } }