// getLocal attempts to retrieve the value from the datastore func (dht *IpfsDHT) getLocal(key key.Key) ([]byte, error) { log.Debug("getLocal %s", key) v, err := dht.datastore.Get(key.DsKey()) if err != nil { return nil, err } log.Debug("found in db") byt, ok := v.([]byte) if !ok { return nil, errors.New("value stored in datastore not []byte") } rec := new(pb.Record) err = proto.Unmarshal(byt, rec) if err != nil { return nil, err } // TODO: 'if paranoid' if u.Debug { err = dht.verifyRecordLocally(rec) if err != nil { log.Debugf("local record verify failed: %s (discarded)", err) return nil, err } } return rec.GetValue(), nil }
// FIXME(brian): is this method meant to simulate putting a value into the network? func (c *client) PutValue(ctx context.Context, key key.Key, val []byte) error { log.Debugf("PutValue: %s", key) rec := new(dhtpb.Record) rec.Value = val rec.Key = proto.String(string(key)) rec.TimeReceived = proto.String(u.FormatRFC3339(time.Now())) data, err := proto.Marshal(rec) if err != nil { return err } return c.datastore.Put(key.DsKey(), data) }
func verify(ps peer.Peerstore, r *dhtpb.Record) error { v := make(record.Validator) v["pk"] = record.PublicKeyValidator p := peer.ID(r.GetAuthor()) pk := ps.PubKey(p) if pk == nil { return fmt.Errorf("do not have public key for %s", p) } if err := record.CheckRecordSig(r, pk); err != nil { return err } if err := v.VerifyRecord(r); err != nil { return err } return nil }
// verifyRecordLocally attempts to verify a record. if we do not have the public // key, we fail. we do not search the dht. func (dht *IpfsDHT) verifyRecordLocally(r *pb.Record) error { if len(r.Signature) > 0 { // First, validate the signature p := peer.ID(r.GetAuthor()) pk := dht.peerstore.PubKey(p) if pk == nil { return fmt.Errorf("do not have public key for %s", p) } if err := record.CheckRecordSig(r, pk); err != nil { return err } } return dht.Validator.VerifyRecord(r) }
func (c *offlineRouting) GetValue(ctx context.Context, key key.Key) ([]byte, error) { v, err := c.datastore.Get(key.DsKey()) if err != nil { return nil, err } byt, ok := v.([]byte) if !ok { return nil, errors.New("value stored in datastore not []byte") } rec := new(pb.Record) err = proto.Unmarshal(byt, rec) if err != nil { return nil, err } return rec.GetValue(), nil }
// verifyRecordOnline verifies a record, searching the DHT for the public key // if necessary. The reason there is a distinction in the functions is that // retrieving arbitrary public keys from the DHT as a result of passively // receiving records (e.g. through a PUT_VALUE or ADD_PROVIDER) can cause a // massive amplification attack on the dht. Use with care. func (dht *IpfsDHT) verifyRecordOnline(ctx context.Context, r *pb.Record) error { if len(r.Signature) > 0 { // get the public key, search for it if necessary. p := peer.ID(r.GetAuthor()) pk, err := dht.GetPublicKey(ctx, p) if err != nil { return err } err = record.CheckRecordSig(r, pk) if err != nil { return err } } return dht.Validator.VerifyRecord(r) }
// FIXME(brian): is this method meant to simulate getting a value from the network? func (c *client) GetValue(ctx context.Context, key key.Key) ([]byte, error) { log.Debugf("GetValue: %s", key) v, err := c.datastore.Get(key.DsKey()) if err != nil { return nil, err } data, ok := v.([]byte) if !ok { return nil, errors.New("could not cast value from datastore") } rec := new(dhtpb.Record) err = proto.Unmarshal(data, rec) if err != nil { return nil, err } return rec.GetValue(), nil }
func (p *ipnsPublisher) getPreviousSeqNo(ctx context.Context, ipnskey key.Key) (uint64, error) { prevrec, err := p.ds.Get(ipnskey.DsKey()) if err != nil && err != ds.ErrNotFound { // None found, lets start at zero! return 0, err } var val []byte if err == nil { prbytes, ok := prevrec.([]byte) if !ok { return 0, fmt.Errorf("unexpected type returned from datastore: %#v", prevrec) } dhtrec := new(dhtpb.Record) err := proto.Unmarshal(prbytes, dhtrec) if err != nil { return 0, err } val = dhtrec.GetValue() } else { // try and check the dht for a record ctx, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() rv, err := p.routing.GetValue(ctx, ipnskey) if err != nil { // no such record found, start at zero! return 0, nil } val = rv } e := new(pb.IpnsEntry) err = proto.Unmarshal(val, e) if err != nil { return 0, err } return e.GetSequence(), nil }
func (rp *Republisher) getLastVal(k key.Key) (path.Path, uint64, error) { ival, err := rp.ds.Get(k.DsKey()) if err != nil { // not found means we dont have a previously published entry return "", 0, errNoEntry } val := ival.([]byte) dhtrec := new(dhtpb.Record) err = proto.Unmarshal(val, dhtrec) if err != nil { return "", 0, err } // extract published data from record e := new(pb.IpnsEntry) err = proto.Unmarshal(dhtrec.GetValue(), e) if err != nil { return "", 0, err } return path.Path(e.Value), e.GetSequence(), nil }
// VerifyRecord checks a record and ensures it is still valid. // It runs needed validators func (v Validator) VerifyRecord(r *pb.Record) error { // Now, check validity func parts := path.SplitList(r.GetKey()) if len(parts) < 3 { log.Infof("Record key does not have validator: %s", key.Key(r.GetKey())) return nil } val, ok := v[parts[1]] if !ok { log.Infof("Unrecognized key prefix: %s", parts[1]) return ErrInvalidRecordType } return val.Func(key.Key(r.GetKey()), r.GetValue()) }
// MakePutRecord creates and signs a dht record for the given key/value pair func MakePutRecord(sk ci.PrivKey, key key.Key, value []byte, sign bool) (*pb.Record, error) { record := new(pb.Record) record.Key = proto.String(string(key)) record.Value = value pkh, err := sk.GetPublic().Hash() if err != nil { return nil, err } record.Author = proto.String(string(pkh)) if sign { blob := RecordBlobForSig(record) sig, err := sk.Sign(blob) if err != nil { return nil, err } record.Signature = sig } return record, nil }
// RecordBlobForSig returns the blob protected by the record signature func RecordBlobForSig(r *pb.Record) []byte { k := []byte(r.GetKey()) v := []byte(r.GetValue()) a := []byte(r.GetAuthor()) return bytes.Join([][]byte{k, v, a}, []byte{}) }
func (dht *IpfsDHT) checkLocalDatastore(k key.Key) (*pb.Record, error) { log.Debugf("%s handleGetValue looking into ds", dht.self) dskey := k.DsKey() iVal, err := dht.datastore.Get(dskey) log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal) if err == ds.ErrNotFound { return nil, nil } // if we got an unexpected error, bail. if err != nil { return nil, err } // if we have the value, send it back log.Debugf("%s handleGetValue success!", dht.self) byts, ok := iVal.([]byte) if !ok { return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey) } rec := new(pb.Record) err = proto.Unmarshal(byts, rec) if err != nil { log.Debug("Failed to unmarshal dht record from datastore") return nil, err } // if its our record, dont bother checking the times on it if peer.ID(rec.GetAuthor()) == dht.self { return rec, nil } var recordIsBad bool recvtime, err := u.ParseRFC3339(rec.GetTimeReceived()) if err != nil { log.Info("either no receive time set on record, or it was invalid: ", err) recordIsBad = true } if time.Now().Sub(recvtime) > MaxRecordAge { log.Debug("old record found, tossing.") recordIsBad = true } // NOTE: we do not verify the record here beyond checking these timestamps. // we put the burden of checking the records on the requester as checking a record // may be computationally expensive if recordIsBad { err := dht.datastore.Delete(dskey) if err != nil { log.Error("Failed to delete bad record from datastore: ", err) } return nil, nil // can treat this as not having the record at all } return rec, nil }
func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(i.node.Context(), time.Hour) // the hour is a hard fallback, we don't expect it to happen, but just in case defer cancel() if cn, ok := w.(http.CloseNotifier); ok { clientGone := cn.CloseNotify() go func() { select { case <-clientGone: case <-ctx.Done(): } cancel() }() } // If this is an ipns query let's check to see if it's using our own peer ID. // If so let's resolve it locally instead of going out to the network. var paths []string = strings.Split(r.URL.Path, "/") if paths[1] == "ipns" && paths[2] == i.node.Identity.Pretty() { id := i.node.Identity _, ipnskey := namesys.IpnsKeysForID(id) ival, _ := i.node.Repo.Datastore().Get(ipnskey.DsKey()) val := ival.([]byte) dhtrec := new(dhtpb.Record) proto.Unmarshal(val, dhtrec) e := new(pb.IpnsEntry) proto.Unmarshal(dhtrec.GetValue(), e) pth := path.Path(e.Value).String() pth = "/ipfs/" + pth for _, p := range paths[3:] { pth += "/" + p } r.URL.Path = pth } urlPath := r.URL.Path // If the gateway is behind a reverse proxy and mounted at a sub-path, // the prefix header can be set to signal this sub-path. // It will be prepended to links in directory listings and the index.html redirect. prefix := "" if prefixHdr := r.Header["X-Ipfs-Gateway-Prefix"]; len(prefixHdr) > 0 { prfx := prefixHdr[0] for _, p := range i.config.PathPrefixes { if prfx == p || strings.HasPrefix(prfx, p+"/") { prefix = prfx break } } } // IPNSHostnameOption might have constructed an IPNS path using the Host header. // In this case, we need the original path for constructing redirects // and links that match the requested URL. // For example, http://example.net would become /ipns/example.net, and // the redirects and links would end up as http://example.net/ipns/example.net originalUrlPath := prefix + urlPath ipnsHostname := false if hdr := r.Header["X-Ipns-Original-Path"]; len(hdr) > 0 { originalUrlPath = prefix + hdr[0] ipnsHostname = true } if i.config.BlockList != nil && i.config.BlockList.ShouldBlock(urlPath) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("403 - Forbidden")) return } nd, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { webError(w, "Path Resolve error", err, http.StatusBadRequest) return } etag := gopath.Base(urlPath) if r.Header.Get("If-None-Match") == etag { w.WriteHeader(http.StatusNotModified) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("X-IPFS-Path", urlPath) // set 'allowed' headers w.Header().Set("Access-Control-Allow-Headers", "X-Stream-Output, X-Chunked-Output") // expose those headers w.Header().Set("Access-Control-Expose-Headers", "X-Stream-Output, X-Chunked-Output") // Suborigin header, sandboxes apps from each other in the browser (even // though they are served from the same gateway domain). // // Omited if the path was treated by IPNSHostnameOption(), for example // a request for http://example.net/ would be changed to /ipns/example.net/, // which would turn into an incorrect Suborigin: example.net header. // // NOTE: This is not yet widely supported by browsers. if !ipnsHostname { pathRoot := strings.SplitN(urlPath, "/", 4)[2] w.Header().Set("Suborigin", pathRoot) } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil && err != uio.ErrIsDir { // not a directory and still an error internalWebError(w, err) return } // set these headers _after_ the error, for we may just not have it // and dont want the client to cache a 500 response... // and only if it's /ipfs! // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() if strings.HasPrefix(urlPath, ipfsPathPrefix) { w.Header().Set("Etag", etag) w.Header().Set("Cache-Control", "public, max-age=29030400, immutable") // set modtime to a really long time ago, since files are immutable and should stay cached modtime = time.Unix(1, 0) } if err == nil { defer dr.Close() name := gopath.Base(urlPath) http.ServeContent(w, r, name, modtime, dr) return } // storage for directory listing var dirListing []directoryItem // loop through files foundIndex := false for _, link := range nd.Links { if link.Name == "index.html" { log.Debugf("found index.html link for %s", urlPath) foundIndex = true if urlPath[len(urlPath)-1] != '/' { // See comment above where originalUrlPath is declared. http.Redirect(w, r, originalUrlPath+"/", 302) log.Debugf("redirect to %s", originalUrlPath+"/") return } // return index page instead. nd, err := core.Resolve(ctx, i.node, path.Path(urlPath+"/index.html")) if err != nil { internalWebError(w, err) return } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil { internalWebError(w, err) return } defer dr.Close() // write to request http.ServeContent(w, r, "index.html", modtime, dr) break } // See comment above where originalUrlPath is declared. di := directoryItem{humanize.Bytes(link.Size), link.Name, gopath.Join(originalUrlPath, link.Name)} dirListing = append(dirListing, di) } if !foundIndex { if r.Method != "HEAD" { // construct the correct back link // https://github.com/ipfs/go-ipfs/issues/1365 var backLink string = prefix + urlPath // don't go further up than /ipfs/$hash/ pathSplit := path.SplitList(backLink) switch { // keep backlink case len(pathSplit) == 3: // url: /ipfs/$hash // keep backlink case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/ // add the correct link depending on wether the path ends with a slash default: if strings.HasSuffix(backLink, "/") { backLink += "./.." } else { backLink += "/.." } } // strip /ipfs/$hash from backlink if IPNSHostnameOption touched the path. if ipnsHostname { backLink = prefix + "/" if len(pathSplit) > 5 { // also strip the trailing segment, because it's a backlink backLinkParts := pathSplit[3 : len(pathSplit)-2] backLink += path.Join(backLinkParts) + "/" } } // See comment above where originalUrlPath is declared. tplData := listingTemplateData{ Listing: dirListing, Path: originalUrlPath, BackLink: backLink, } err := listingTemplate.Execute(w, tplData) if err != nil { internalWebError(w, err) return } } } }
func (x *Start) Execute(args []string) error { printSplashScreen() // set repo path var repoPath string if x.Testnet { repoPath = "~/.openbazaar2-testnet" } else { repoPath = "~/.openbazaar2" } expPath, _ := homedir.Expand(filepath.Clean(repoPath)) // Database sqliteDB, err := db.Create(expPath, x.Password, x.Testnet) if err != nil { return err } // logging w := &lumberjack.Logger{ Filename: path.Join(expPath, "logs", "ob.log"), MaxSize: 10, // megabytes MaxBackups: 3, MaxAge: 30, //days } backendStdout := logging.NewLogBackend(os.Stdout, "", 0) backendFile := logging.NewLogBackend(w, "", 0) backendStdoutFormatter := logging.NewBackendFormatter(backendStdout, stdoutLogFormat) backendFileFormatter := logging.NewBackendFormatter(backendFile, fileLogFormat) logging.SetBackend(backendFileFormatter, backendStdoutFormatter) ipfslogging.LdJSONFormatter() w2 := &lumberjack.Logger{ Filename: path.Join(expPath, "logs", "ipfs.log"), MaxSize: 10, // megabytes MaxBackups: 3, MaxAge: 30, //days } ipfslogging.Output(w2)() // initalize the ipfs repo if it doesn't already exist err = repo.DoInit(os.Stdout, expPath, 4096, x.Testnet, x.Password, sqliteDB.Config().Init) if err != nil && err != repo.ErrRepoExists { log.Error(err) return err } // if the db can't be decrypted, exit if sqliteDB.Config().IsEncrypted() { return encryptedDatabaseError } // ipfs node setup r, err := fsrepo.Open(repoPath) if err != nil { log.Error(err) return err } cctx, cancel := context.WithCancel(context.Background()) defer cancel() cfg, err := r.Config() if err != nil { log.Error(err) return err } identityKey, err := sqliteDB.Config().GetIdentityKey() if err != nil { log.Error(err) return err } identity, err := ipfs.IdentityFromKey(identityKey) if err != nil { return err } cfg.Identity = identity // Run stun and set uTP port if x.STUN { for i, addr := range cfg.Addresses.Swarm { m, _ := ma.NewMultiaddr(addr) p := m.Protocols() if p[0].Name == "ip4" && p[1].Name == "udp" && p[2].Name == "utp" { port, serr := net.Stun() if serr != nil { log.Error(serr) return err } cfg.Addresses.Swarm = append(cfg.Addresses.Swarm[:i], cfg.Addresses.Swarm[i+1:]...) cfg.Addresses.Swarm = append(cfg.Addresses.Swarm, "/ip4/0.0.0.0/udp/"+strconv.Itoa(port)+"/utp") break } } } ncfg := &ipfscore.BuildCfg{ Repo: r, Online: true, } nd, err := ipfscore.NewNode(cctx, ncfg) if err != nil { log.Error(err) return err } ctx := commands.Context{} ctx.Online = true ctx.ConfigRoot = expPath ctx.LoadConfig = func(path string) (*config.Config, error) { return fsrepo.ConfigAt(expPath) } ctx.ConstructNode = func() (*ipfscore.IpfsNode, error) { return nd, nil } log.Info("Peer ID: ", nd.Identity.Pretty()) printSwarmAddrs(nd) // Get current directory root hash _, ipnskey := namesys.IpnsKeysForID(nd.Identity) ival, _ := nd.Repo.Datastore().Get(ipnskey.DsKey()) val := ival.([]byte) dhtrec := new(dhtpb.Record) proto.Unmarshal(val, dhtrec) e := new(namepb.IpnsEntry) proto.Unmarshal(dhtrec.GetValue(), e) // Wallet mn, err := sqliteDB.Config().GetMnemonic() if err != nil { log.Error(err) return err } var params chaincfg.Params if !x.Testnet { params = chaincfg.MainNetParams } else { params = chaincfg.TestNet3Params } libbitcoinServers, err := repo.GetLibbitcoinServers(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } maxFee, err := repo.GetMaxFee(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } feeApi, err := repo.GetFeeAPI(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } low, medium, high, err := repo.GetDefaultFees(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } wallet := libbitcoin.NewLibbitcoinWallet(mn, ¶ms, sqliteDB, libbitcoinServers, maxFee, low, medium, high, feeApi) // Offline messaging storage var storage sto.OfflineMessagingStorage if x.Storage == "self-hosted" || x.Storage == "" { storage = selfhosted.NewSelfHostedStorage(expPath, ctx) } else if x.Storage == "dropbox" { token, err := repo.GetDropboxApiToken(path.Join(expPath, "config")) if err != nil { log.Error(err) return err } else if token == "" { err = errors.New("Dropbox token not set in config file") log.Error(err) return err } storage, err = dropbox.NewDropBoxStorage(token) if err != nil { log.Error(err) return err } } else { err = errors.New("Invalid storage option") log.Error(err) return err } // OpenBazaar node setup core.Node = &core.OpenBazaarNode{ Context: ctx, IpfsNode: nd, RootHash: ipath.Path(e.Value).String(), RepoPath: expPath, Datastore: sqliteDB, Wallet: wallet, MessageStorage: storage, } var gwErrc <-chan error var cb <-chan bool if len(cfg.Addresses.Gateway) > 0 { var err error err, cb, gwErrc = serveHTTPGateway(core.Node) if err != nil { log.Error(err) return err } } // Wait for gateway to start before starting the network service. // This way the websocket channel we pass into the service gets created first. // FIXME: There has to be a better way for b := range cb { if b == true { OBService := service.SetupOpenBazaarService(nd, core.Node.Broadcast, ctx, sqliteDB) core.Node.Service = OBService MR := net.NewMessageRetriever(sqliteDB, ctx, nd, OBService, 16, core.Node.SendOfflineAck) go MR.Run() core.Node.MessageRetriever = MR PR := net.NewPointerRepublisher(nd, sqliteDB) go PR.Run() core.Node.PointerRepublisher = PR } break } for err := range gwErrc { fmt.Println(err) } return nil }