func addressBytesToString(p Protocol, b []byte) (string, error) { switch p.Code { // ipv4,6 case P_IP4, P_IP6: return net.IP(b).String(), nil // tcp udp dccp sctp case P_TCP, P_UDP, P_DCCP, P_SCTP: i := binary.BigEndian.Uint16(b) return strconv.Itoa(int(i)), nil case P_IPFS: // ipfs // the address is a varint-prefixed multihash string representation size, n := ReadVarintCode(b) b = b[n:] if len(b) != size { panic("inconsistent lengths") } m, err := mh.Cast(b) if err != nil { return "", err } return m.B58String(), nil } return "", fmt.Errorf("unknown protocol") }
// IsValidHash checks whether a given hash is valid (b58 decodable, len > 0) func IsValidHash(s string) bool { out := b58.Decode(s) if out == nil || len(out) == 0 { return false } _, err := mh.Cast(out) if err != nil { return false } return true }
func Decode(encoding, digest string) (mh.Multihash, error) { switch encoding { case "raw": return mh.Cast([]byte(digest)) case "hex": return hex.DecodeString(digest) case "base58": return base58.Decode(digest), nil case "base64": return base64.StdEncoding.DecodeString(digest) default: return nil, fmt.Errorf("unknown encoding: %s", encoding) } }
func ParseKeyToPath(txt string) (Path, error) { if txt == "" { return "", ErrNoComponents } chk := b58.Decode(txt) if len(chk) == 0 { return "", errors.New("not a key") } if _, err := mh.Cast(chk); err != nil { return "", err } return FromKey(key.Key(chk)), nil }
// Unmarshal decodes raw data into a *Node instance. // The conversion uses an intermediate PBNode. func (n *Node) Unmarshal(encoded []byte) error { var pbn pb.PBNode if err := pbn.Unmarshal(encoded); err != nil { return fmt.Errorf("Unmarshal failed. %v", err) } pbnl := pbn.GetLinks() n.Links = make([]*Link, len(pbnl)) for i, l := range pbnl { n.Links[i] = &Link{Name: l.GetName(), Size: l.GetTsize()} h, err := mh.Cast(l.GetHash()) if err != nil { return fmt.Errorf("Link hash is not valid multihash. %v", err) } n.Links[i].Hash = h } sort.Stable(LinkSlice(n.Links)) // keep links sorted n.Data = pbn.GetData() return nil }
// ValidatePublicKeyRecord implements ValidatorFunc and // verifies that the passed in record value is the PublicKey // that matches the passed in key. func ValidatePublicKeyRecord(k key.Key, val []byte) error { if len(k) < 5 { return errors.New("invalid public key record key") } prefix := string(k[:4]) if prefix != "/pk/" { return errors.New("key was not prefixed with /pk/") } keyhash := []byte(k[4:]) if _, err := mh.Cast(keyhash); err != nil { return fmt.Errorf("key did not contain valid multihash: %s", err) } pkh := u.Hash(val) if !bytes.Equal(keyhash, pkh) { return errors.New("public key does not match storage key") } return nil }
func (s *DropBoxStorage) Store(peerID peer.ID, ciphertext []byte) (ma.Multiaddr, error) { api := dropbox.Client(s.apiToken, dropbox.Options{Verbose: true}) hash := sha256.Sum256(ciphertext) hex := hex.EncodeToString(hash[:]) // Upload ciphertext uploadArg := files.NewCommitInfo("/" + hex) r := bytes.NewReader(ciphertext) _, err := api.Upload(uploadArg, r) if err != nil { return nil, err } // Set public sharing sharingArg := sharing.NewCreateSharedLinkArg("/" + hex) res, err := api.CreateSharedLink(sharingArg) if err != nil { return nil, err } // Create encoded multiaddr url := res.Url[:len(res.Url)-1] + "1" b, err := mh.Encode([]byte(url), mh.SHA1) if err != nil { return nil, err } m, err := mh.Cast(b) if err != nil { return nil, err } addr, err := ma.NewMultiaddr("/ipfs/" + m.B58String() + "/https/") if err != nil { return nil, err } return addr, nil }
// IDFromBytes cast a string to ID type, and validate // the id to make sure it is a multihash. func IDFromBytes(b []byte) (ID, error) { if _, err := mh.Cast(b); err != nil { return ID(""), err } return ID(b), nil }
// IDFromString cast a string to ID type, and validate // the id to make sure it is a multihash. func IDFromString(s string) (ID, error) { if _, err := mh.Cast([]byte(s)); err != nil { return ID(""), err } return ID(s), nil }
func TestMultisetRoundtrip(t *testing.T) { dstore := dssync.MutexWrap(datastore.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := blockservice.New(bstore, offline.Exchange(bstore)) dag := merkledag.NewDAGService(bserv) fn := func(m map[key.Key]uint16) bool { // Convert invalid multihash from input to valid ones for k, v := range m { if _, err := mh.Cast([]byte(k)); err != nil { delete(m, k) m[key.Key(u.Hash([]byte(k)))] = v } } // Generate a smaller range for refcounts than full uint64, as // otherwise this just becomes overly cpu heavy, splitting it // out into too many items. That means we need to convert to // the right kind of map. As storeMultiset mutates the map as // part of its bookkeeping, this is actually good. refcounts := copyMap(m) ctx := context.Background() n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys) if err != nil { t.Fatalf("storing multiset: %v", err) } // Check that the node n is in the DAG k, err := n.Key() if err != nil { t.Fatalf("Could not get key: %v", err) } _, err = dag.Get(ctx, k) if err != nil { t.Fatalf("Could not get node: %v", err) } root := &merkledag.Node{} const linkName = "dummylink" if err := root.AddNodeLink(linkName, n); err != nil { t.Fatalf("adding link to root node: %v", err) } roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys) if err != nil { t.Fatalf("loading multiset: %v", err) } orig := copyMap(m) success := true for k, want := range orig { if got, ok := roundtrip[k]; ok { if got != want { success = false t.Logf("refcount changed: %v -> %v for %q", want, got, k) } delete(orig, k) delete(roundtrip, k) } } for k, v := range orig { success = false t.Logf("refcount missing: %v for %q", v, k) } for k, v := range roundtrip { success = false t.Logf("refcount extra: %v for %q", v, k) } return success } if err := quick.Check(fn, nil); err != nil { t.Fatal(err) } }
// AllKeysChan runs a query for keys from the blockstore. // this is very simplistic, in the future, take dsq.Query as a param? // // AllKeysChan respects context func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { // KeysOnly, because that would be _a lot_ of data. q := dsq.Query{KeysOnly: true} // datastore/namespace does *NOT* fix up Query.Prefix q.Prefix = BlockPrefix.String() res, err := bs.datastore.Query(q) if err != nil { return nil, err } // this function is here to compartmentalize get := func() (key.Key, bool) { select { case <-ctx.Done(): return "", false case e, more := <-res.Next(): if !more { return "", false } if e.Error != nil { log.Debug("blockstore.AllKeysChan got err:", e.Error) return "", false } // need to convert to key.Key using key.KeyFromDsKey. k, err := key.KeyFromDsKey(ds.NewKey(e.Key)) if err != nil { log.Warningf("error parsing key from DsKey: ", err) return "", true } log.Debug("blockstore: query got key", k) // key must be a multihash. else ignore it. _, err = mh.Cast([]byte(k)) if err != nil { log.Warningf("key from datastore was not a multihash: ", err) return "", true } return k, true } } output := make(chan key.Key, dsq.KeysOnlyBufSize) go func() { defer func() { res.Process().Close() // ensure exit (signals early exit, too) close(output) }() for { k, ok := get() if !ok { return } if k == "" { continue } select { case <-ctx.Done(): return case output <- k: } } }() return output, nil }
// resolveOnce implements resolver. Uses the IPFS routing system to // resolve SFS-like names. func (r *routingResolver) resolveOnce(ctx context.Context, name string) (path.Path, error) { log.Debugf("RoutingResolve: '%s'", name) cached, ok := r.cacheGet(name) if ok { return cached, nil } hash, err := mh.FromB58String(name) if err != nil { // name should be a multihash. if it isn't, error out here. log.Warningf("RoutingResolve: bad input hash: [%s]\n", name) return "", err } // use the routing system to get the name. // /ipns/<name> h := []byte("/ipns/" + string(hash)) var entry *pb.IpnsEntry var pubkey ci.PubKey resp := make(chan error, 2) go func() { ipnsKey := key.Key(h) val, err := r.routing.GetValue(ctx, ipnsKey) if err != nil { log.Warning("RoutingResolve get failed.") resp <- err } entry = new(pb.IpnsEntry) err = proto.Unmarshal(val, entry) if err != nil { resp <- err } resp <- nil }() go func() { // name should be a public key retrievable from ipfs pubk, err := routing.GetPublicKey(r.routing, ctx, hash) if err != nil { resp <- err } pubkey = pubk resp <- nil }() for i := 0; i < 2; i++ { err = <-resp if err != nil { return "", err } } hsh, _ := pubkey.Hash() log.Debugf("pk hash = %s", key.Key(hsh)) // check sig with pk if ok, err := pubkey.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok { return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pubkey) } // ok sig checks out. this is a valid name. // check for old style record: valh, err := mh.Cast(entry.GetValue()) if err != nil { // Not a multihash, probably a new record p, err := path.ParsePath(string(entry.GetValue())) if err != nil { return "", err } r.cacheSet(name, p, entry) return p, nil } else { // Its an old style multihash record log.Warning("Detected old style multihash record") p := path.FromKey(key.Key(valh)) r.cacheSet(name, p, entry) return p, nil } }