Ejemplo n.º 1
0
func startupIPFS(dspath string, ctx *context.Context) (*core.IpfsNode, error) {
	r, err := fsrepo.Open(dspath)
	if err != nil {
		config, err := config.Init(os.Stdout, 2048)
		if err != nil {
			return nil, err
		}

		if err := fsrepo.Init(dspath, config); err != nil {
			return nil, err
		}

		r, err = fsrepo.Open(dspath)
		if err != nil {
			return nil, err
		}
	}

	//fmt.Println(r)
	node, err := core.NewNode(*ctx, &core.BuildCfg{Online: true, Repo: r})
	if err != nil {
		return nil, err
	}

	return node, nil

}
Ejemplo n.º 2
0
func createNode(nd *Node, online bool, ctx context.Context) (*core.IpfsNode, error) {
	// `nd` only contains the prepopulated fields as in New().
	rp, err := fsrepo.Open(nd.Path)
	if err != nil {
		log.Errorf("Unable to open repo `%s`: %v", nd.Path, err)
		return nil, err
	}

	swarmAddrs := []string{
		fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", nd.SwarmPort),
		fmt.Sprintf("/ip6/::/tcp/%d", nd.SwarmPort),
	}

	if err := rp.SetConfigKey("Addresses.Swarm", swarmAddrs); err != nil {
		return nil, err
	}

	cfg := &core.BuildCfg{
		Repo:   rp,
		Online: online,
	}

	ipfsNode, err := core.NewNode(ctx, cfg)
	if err != nil {
		return nil, err
	}

	return ipfsNode, nil
}
Ejemplo n.º 3
0
func NewTmpDirNode(ctx context.Context) (*core.IpfsNode, error) {
	dir, err := ioutil.TempDir("", "ipfs-shell")
	if err != nil {
		return nil, fmt.Errorf("failed to get temp dir: %s", err)
	}

	cfg, err := config.Init(ioutil.Discard, 1024)
	if err != nil {
		return nil, err
	}

	err = fsrepo.Init(dir, cfg)
	if err != nil {
		return nil, fmt.Errorf("failed to init ephemeral node: %s", err)
	}

	repo, err := fsrepo.Open(dir)
	if err != nil {
		return nil, err
	}

	return core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Repo:   repo,
	})
}
Ejemplo n.º 4
0
func BuildNode(ctx context.Context, dcConfig *DCConfig) (*core.IpfsNode, error) {
	// Get a IPFS repo/config directory, and open it
	ipfsPath, err := getIpfsPath(dcConfig.PoolIndex)
	if err != nil {
		fmt.Println(err)
		os.Exit(1)
	}

	repo, err := fsrepo.Open(ipfsPath)
	if err != nil {
		return nil, err
	}

	// Create the node with a config
	node, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Repo:   repo,
	})

	if err != nil {
		return nil, err
	}

	return node, nil
}
Ejemplo n.º 5
0
func (i *cmdInvocation) constructNodeFunc(ctx context.Context) func() (*core.IpfsNode, error) {
	return func() (*core.IpfsNode, error) {
		if i.req == nil {
			return nil, errors.New("constructing node without a request")
		}

		cmdctx := i.req.InvocContext()
		if cmdctx == nil {
			return nil, errors.New("constructing node without a request context")
		}

		r, err := fsrepo.Open(i.req.InvocContext().ConfigRoot)
		if err != nil { // repo is owned by the node
			return nil, err
		}

		// ok everything is good. set it on the invocation (for ownership)
		// and return it.
		n, err := core.NewNode(ctx, &core.BuildCfg{
			Online: cmdctx.Online,
			Repo:   r,
		})
		if err != nil {
			return nil, err
		}
		i.node = n
		return i.node, nil
	}
}
Ejemplo n.º 6
0
func addDefaultAssets(out io.Writer, repoRoot string) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}

	nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
	if err != nil {
		return err
	}
	defer nd.Close()

	dkey, err := assets.SeedInitDocs(nd)
	if err != nil {
		return fmt.Errorf("init: seeding init docs failed: %s", err)
	}
	log.Debugf("init: seeded init docs %s", dkey)

	if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil {
		return err
	}

	_, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey)
	return err
}
Ejemplo n.º 7
0
func initializeIpnsKeyspace(repoRoot string, privKeyBytes []byte) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}
	cfg, err := r.Config()
	if err != nil {
		log.Error(err)
		return err
	}
	identity, err := ipfs.IdentityFromKey(privKeyBytes)
	if err != nil {
		return err
	}

	cfg.Identity = identity
	nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
	if err != nil {
		return err
	}
	defer nd.Close()

	err = nd.SetupOfflineRouting()
	if err != nil {
		return err
	}

	return namesys.InitializeKeyspace(ctx, nd.DAG, nd.Namesys, nd.Pinning, nd.PrivateKey)
}
Ejemplo n.º 8
0
// TODO share func
func writeConfig(path string, cfg *config.Config) error {
	// NB: This needs to run on the daemon.
	r, err := fsrepo.Open(path)
	if err != nil {
		return err
	}
	defer r.Close()
	return r.SetConfig(cfg)
}
Ejemplo n.º 9
0
func addDefaultAssets(out io.Writer, repoRoot string) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}

	nd, err := core.NewIPFSNode(ctx, core.Offline(r))
	if err != nil {
		return err
	}
	defer nd.Close()

	dirb := uio.NewDirectory(nd.DAG)

	// add every file in the assets pkg
	for fname, file := range assets.Init_dir {
		buf := bytes.NewBufferString(file)
		s, err := coreunix.Add(nd, buf)
		if err != nil {
			return err
		}

		k := key.B58KeyDecode(s)
		if err := dirb.AddChild(fname, k); err != nil {
			return err
		}
	}

	dir := dirb.GetNode()
	dkey, err := nd.DAG.Add(dir)
	if err != nil {
		return err
	}

	if err := nd.Pinning.Pin(ctx, dir, true); err != nil {
		return err
	}

	if err := nd.Pinning.Flush(); err != nil {
		return err
	}

	if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil {
		return err
	}

	_, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey)
	return err
}
Ejemplo n.º 10
0
Archivo: self.go Proyecto: utamaro/core
//NewSelf make repo if needed ,starts daemon and returns Self obj.
func NewSelf(cfg *config.Config, rootPath string) *Self {
	InitRepo(cfg.IpfsRepo)
	n := &merkledag.Node{}
	k, err := n.Key()
	log.IfFatal(err)
	//workaround "ERROR    bitswap: failed to find any peer in table"
	i := 0
	var node *core.IpfsNode
	var ctx context.Context
	var cancel context.CancelFunc
	for i = 0; i < 10; i++ {
		log.Println("setting up node...")
		r, err := fsrepo.Open(cfg.IpfsRepo)
		log.IfFatal(err)
		ctx, cancel = context.WithCancel(context.Background())
		node, err = core.NewNode(ctx, &core.BuildCfg{
			Repo:   r,
			Online: true,
		})
		log.IfFatal(err)
		if err := node.Routing.Provide(ctx, k); log.If(err) {
			cancel()
			log.If(node.Close())
			log.Println("retrying...")
			continue
		}
		break
	}
	if i == 10 {
		log.Fatal("cannot provide a key to network")
	}
	self := &Self{
		RootPath: rootPath,
		ipfsNode: node,
		ctx:      ctx,
		cancel:   cancel,
		cfg:      cfg,
	}
	self.follow = FromStringSlice(cfg.FollowPeers, self)
	parent, err := self.ToPeer().GetDAGNode("")
	if log.If(err) {
		parent = &merkledag.Node{}
	}
	self.myIpns, err = parent.Key()
	log.IfFatal(err)
	if _, err = parent.GetNodeLink(rootPath); err != nil {
		log.Println("initializing DAGs for saving status")
		self.makeInitNodes(parent)
	}
	return self
}
Ejemplo n.º 11
0
func addConfigExtensions(repoRoot string, testnet bool) error {
	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}
	type Server struct {
		Url       string
		PublicKey []byte
	}
	var ls []Server
	if !testnet {
		ls = []Server{
			{Url: "tcp://libbitcoin1.openbazaar.org:9091", PublicKey: []byte{}},
			{Url: "tcp://libbitcoin3.openbazaar.org:9091", PublicKey: []byte{}},
		}
	} else {
		ls = []Server{
			{Url: "tcp://libbitcoin2.openbazaar.org:9091", PublicKey: []byte(zmq4.Z85decode("baihZB[vT(dcVCwkhYLAzah<t2gJ>{3@k?+>T&^3"))},
			{Url: "tcp://libbitcoin4.openbazaar.org:9091", PublicKey: []byte(zmq4.Z85decode("<Z&{.=LJSPySefIKgCu99w.L%b^6VvuVp0+pbnOM"))},
		}
	}
	type Wallet struct {
		LibbitcoinServers []Server
		MaxFee            int
		FeeAPI            string
		HighFeeDefault    int
		MediumFeeDefault  int
		LowFeeDefault     int
	}
	var w Wallet = Wallet{
		LibbitcoinServers: ls,
		MaxFee:            1500000,
		FeeAPI:            "https://bitcoinfees.21.co/api/v1/fees/recommended",
		HighFeeDefault:    60,
		MediumFeeDefault:  40,
		LowFeeDefault:     20,
	}
	if err := extendConfigFile(r, "Wallet", w); err != nil {
		return err
	}
	if err := extendConfigFile(r, "Resolver", "https://resolver.onename.com/"); err != nil {
		return err
	}
	if err := extendConfigFile(r, "Dropbox-api-token", ""); err != nil {
		return err
	}
	if err := r.Close(); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 12
0
Archivo: demo.go Proyecto: f3r/examples
func SetupIpfs() (*core.IpfsNode, error) {
	// Assume the user has run 'ipfs init'
	r, err := fsrepo.Open("~/.ipfs")
	if err != nil {
		return nil, err
	}

	cfg := &core.BuildCfg{
		Repo:   r,
		Online: true,
	}

	return core.NewNode(context.Background(), cfg)
}
Ejemplo n.º 13
0
func main() {
	bsize := kingpin.Flag("blocksize", "blocksize to test with").Default("262144").Int64()
	kingpin.Parse()

	ipfsdir := getIpfsDir()
	r, err := fsrepo.Open(ipfsdir)
	if err != nil {
		fmt.Printf("Failed to open ipfs repo at: %s: %s\n", ipfsdir, err)
		fmt.Println("Please ensure ipfs has been initialized and that no daemon is running")
		return
	}

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	nd, err := core.NewNode(ctx, &core.BuildCfg{
		Repo: r,
	})
	if err != nil {
		fmt.Printf("failed to create node: %s\n", err)
		return
	}

	cfg := &BenchCfg{
		Blocksize: *bsize,
	}

	fmt.Println(cfg)
	err = BenchmarkBlockRewrites(nd, cfg)
	if err != nil {
		panic(err)
	}

	err = BenchmarkRandomBlockWrites(nd, cfg)
	if err != nil {
		panic(err)
	}

	err = BenchmarkAdd(nd, cfg)
	if err != nil {
		panic(err)
	}

	err = BenchmarkDiskWrites(ipfsdir)
	if err != nil {
		panic(err)
	}
}
Ejemplo n.º 14
0
// create a new Service
func NewService(name string, location string) (Service, error) {
	// Basic ipfsnode setup
	repo, err := fsrepo.Open(location)
	if err != nil {
		return Service{}, err
	}
	nodeBuilder := core.NewNodeBuilder().Online()
	nodeBuilder.SetRepo(repo)

	ctx, cancel := context.WithCancel(context.Background())

	node, err := nodeBuilder.Build(ctx)
	if err != nil {
		return Service{}, err
	}
	return Service{name, node, cancel, ctx, nil, make(chan bool), false}, nil
}
Ejemplo n.º 15
0
func NewDefaultNodeWithFSRepo(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
	r, err := fsrepo.Open(repoPath)
	if err != nil {
		return nil, errgo.Notef(err, "opening fsrepo failed")
	}
	node, err := core.NewNode(ctx, &core.BuildCfg{
		Online: true,
		Repo:   r,
	})
	if err != nil {
		return nil, errgo.Notef(err, "ipfs NewNode() failed.")
	}
	// TODO: can we bootsrap localy/mdns first and fall back to default?
	err = node.Bootstrap(core.DefaultBootstrapConfig)
	if err != nil {
		return nil, errgo.Notef(err, "ipfs Bootstrap() failed.")
	}
	return node, nil
}
Ejemplo n.º 16
0
func main() {
	bkp, err := fsrepo.BestKnownPath()
	if err != nil {
		panic(err)
	}

	r, err := fsrepo.Open(bkp)
	if err != nil {
		panic(err)
	}

	k := ds.NewKey(os.Args[1])

	val, err := r.Datastore().Get(k)
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		os.Exit(1)
	}

	fmt.Println(string(val.([]byte)))
}
Ejemplo n.º 17
0
func main() {
	if len(os.Args) < 2 {
		fmt.Println("Please give a peer ID as an argument")
		return
	}
	target, err := peer.IDB58Decode(os.Args[1])
	if err != nil {
		panic(err)
	}

	// Basic ipfsnode setup
	r, err := fsrepo.Open("~/.ipfs")
	if err != nil {
		panic(err)
	}

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cfg := &core.BuildCfg{
		Repo:   r,
		Online: true,
	}

	nd, err := core.NewNode(ctx, cfg)

	if err != nil {
		panic(err)
	}

	fmt.Printf("I am peer %s dialing %s\n", nd.Identity, target)

	con, err := corenet.Dial(nd, target, "/app/whyrusleeping")
	if err != nil {
		fmt.Println(err)
		return
	}

	io.Copy(os.Stdout, con)
}
Ejemplo n.º 18
0
func initializeIpnsKeyspace(repoRoot string) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}

	nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
	if err != nil {
		return err
	}
	defer nd.Close()

	err = nd.SetupOfflineRouting()
	if err != nil {
		return err
	}

	return namesys.InitializeKeyspace(ctx, nd.DAG, nd.Namesys, nd.Pinning, nd.PrivateKey)
}
Ejemplo n.º 19
0
Archivo: host.go Proyecto: f3r/examples
func main() {
	// Basic ipfsnode setup
	r, err := fsrepo.Open("~/.ipfs")
	if err != nil {
		panic(err)
	}

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cfg := &core.BuildCfg{
		Repo:   r,
		Online: true,
	}

	nd, err := core.NewNode(ctx, cfg)

	if err != nil {
		panic(err)
	}

	list, err := corenet.Listen(nd, "/app/whyrusleeping")
	if err != nil {
		panic(err)
	}
	fmt.Printf("I am peer: %s\n", nd.Identity.Pretty())

	for {
		con, err := list.Accept()
		if err != nil {
			fmt.Println(err)
			return
		}
		defer con.Close()

		fmt.Fprintln(con, "Hello! This is whyrusleepings awesome ipfs service")
		fmt.Printf("Connection from: %s\n", con.Conn().RemotePeer())
	}
}
Ejemplo n.º 20
0
func addDefaultAssets(out io.Writer, repoRoot string) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	r, err := fsrepo.Open(repoRoot)
	if err != nil { // NB: repo is owned by the node
		return err
	}

	nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
	if err != nil {
		return err
	}
	defer func() {
		log.If(nd.Close())
	}()
	dkey, err := assets.SeedInitDocs(nd)
	if err != nil {
		return fmt.Errorf("init: seeding init docs failed: %s", err)
	}
	log.Println("init: seeded init docs", dkey)

	return nil
}
Ejemplo n.º 21
0
func run(ipfsPath, watchPath string) error {

	proc := process.WithParent(process.Background())
	log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)

	ipfsPath, err := homedir.Expand(ipfsPath)
	if err != nil {
		return err
	}
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		return err
	}
	defer watcher.Close()

	if err := addTree(watcher, watchPath); err != nil {
		return err
	}

	r, err := fsrepo.Open(ipfsPath)
	if err != nil {
		// TODO handle case: daemon running
		// TODO handle case: repo doesn't exist or isn't initialized
		return err
	}
	node, err := core.NewIPFSNode(context.Background(), core.Online(r))
	if err != nil {
		return err
	}
	defer node.Close()

	if *http {
		addr := "/ip4/127.0.0.1/tcp/5001"
		var opts = []corehttp.ServeOption{
			corehttp.GatewayOption(true),
			corehttp.WebUIOption,
			corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
		}
		proc.Go(func(p process.Process) {
			if err := corehttp.ListenAndServe(node, addr, opts...); err != nil {
				return
			}
		})
	}

	interrupts := make(chan os.Signal)
	signal.Notify(interrupts, os.Interrupt, os.Kill)

	for {
		select {
		case <-interrupts:
			return nil
		case e := <-watcher.Events:
			log.Printf("received event: %s", e)
			isDir, err := IsDirectory(e.Name)
			if err != nil {
				continue
			}
			switch e.Op {
			case fsnotify.Remove:
				if isDir {
					if err := watcher.Remove(e.Name); err != nil {
						return err
					}
				}
			default:
				// all events except for Remove result in an IPFS.Add, but only
				// directory creation triggers a new watch
				switch e.Op {
				case fsnotify.Create:
					if isDir {
						addTree(watcher, e.Name)
					}
				}
				proc.Go(func(p process.Process) {
					file, err := os.Open(e.Name)
					if err != nil {
						log.Println(err)
					}
					defer file.Close()
					k, err := coreunix.Add(node, file)
					if err != nil {
						log.Println(err)
					}
					log.Printf("added %s... key: %s", e.Name, k)
				})
			}
		case err := <-watcher.Errors:
			log.Println(err)
		}
	}
	return nil
}
Ejemplo n.º 22
0
func (x *Start) Execute(args []string) error {
	printSplashScreen()

	// set repo path
	var repoPath string
	if x.Testnet {
		repoPath = "~/.openbazaar2-testnet"
	} else {
		repoPath = "~/.openbazaar2"
	}
	expPath, _ := homedir.Expand(filepath.Clean(repoPath))

	// Database
	sqliteDB, err := db.Create(expPath, x.Password, x.Testnet)
	if err != nil {
		return err
	}

	// logging
	w := &lumberjack.Logger{
		Filename:   path.Join(expPath, "logs", "ob.log"),
		MaxSize:    10, // megabytes
		MaxBackups: 3,
		MaxAge:     30, //days
	}
	backendStdout := logging.NewLogBackend(os.Stdout, "", 0)
	backendFile := logging.NewLogBackend(w, "", 0)
	backendStdoutFormatter := logging.NewBackendFormatter(backendStdout, stdoutLogFormat)
	backendFileFormatter := logging.NewBackendFormatter(backendFile, fileLogFormat)
	logging.SetBackend(backendFileFormatter, backendStdoutFormatter)

	ipfslogging.LdJSONFormatter()
	w2 := &lumberjack.Logger{
		Filename:   path.Join(expPath, "logs", "ipfs.log"),
		MaxSize:    10, // megabytes
		MaxBackups: 3,
		MaxAge:     30, //days
	}
	ipfslogging.Output(w2)()

	// initalize the ipfs repo if it doesn't already exist
	err = repo.DoInit(os.Stdout, expPath, 4096, x.Testnet, x.Password, sqliteDB.Config().Init)
	if err != nil && err != repo.ErrRepoExists {
		log.Error(err)
		return err
	}

	// if the db can't be decrypted, exit
	if sqliteDB.Config().IsEncrypted() {
		return encryptedDatabaseError
	}

	// ipfs node setup
	r, err := fsrepo.Open(repoPath)
	if err != nil {
		log.Error(err)
		return err
	}
	cctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cfg, err := r.Config()
	if err != nil {
		log.Error(err)
		return err
	}

	identityKey, err := sqliteDB.Config().GetIdentityKey()
	if err != nil {
		log.Error(err)
		return err
	}
	identity, err := ipfs.IdentityFromKey(identityKey)
	if err != nil {
		return err
	}
	cfg.Identity = identity

	// Run stun and set uTP port
	if x.STUN {
		for i, addr := range cfg.Addresses.Swarm {
			m, _ := ma.NewMultiaddr(addr)
			p := m.Protocols()
			if p[0].Name == "ip4" && p[1].Name == "udp" && p[2].Name == "utp" {
				port, serr := net.Stun()
				if serr != nil {
					log.Error(serr)
					return err
				}
				cfg.Addresses.Swarm = append(cfg.Addresses.Swarm[:i], cfg.Addresses.Swarm[i+1:]...)
				cfg.Addresses.Swarm = append(cfg.Addresses.Swarm, "/ip4/0.0.0.0/udp/"+strconv.Itoa(port)+"/utp")
				break
			}
		}
	}

	ncfg := &ipfscore.BuildCfg{
		Repo:   r,
		Online: true,
	}
	nd, err := ipfscore.NewNode(cctx, ncfg)
	if err != nil {
		log.Error(err)
		return err
	}
	ctx := commands.Context{}
	ctx.Online = true
	ctx.ConfigRoot = expPath
	ctx.LoadConfig = func(path string) (*config.Config, error) {
		return fsrepo.ConfigAt(expPath)
	}
	ctx.ConstructNode = func() (*ipfscore.IpfsNode, error) {
		return nd, nil
	}

	log.Info("Peer ID: ", nd.Identity.Pretty())
	printSwarmAddrs(nd)

	// Get current directory root hash
	_, ipnskey := namesys.IpnsKeysForID(nd.Identity)
	ival, _ := nd.Repo.Datastore().Get(ipnskey.DsKey())
	val := ival.([]byte)
	dhtrec := new(dhtpb.Record)
	proto.Unmarshal(val, dhtrec)
	e := new(namepb.IpnsEntry)
	proto.Unmarshal(dhtrec.GetValue(), e)

	// Wallet
	mn, err := sqliteDB.Config().GetMnemonic()
	if err != nil {
		log.Error(err)
		return err
	}
	var params chaincfg.Params
	if !x.Testnet {
		params = chaincfg.MainNetParams
	} else {
		params = chaincfg.TestNet3Params
	}
	libbitcoinServers, err := repo.GetLibbitcoinServers(path.Join(expPath, "config"))
	if err != nil {
		log.Error(err)
		return err
	}
	maxFee, err := repo.GetMaxFee(path.Join(expPath, "config"))
	if err != nil {
		log.Error(err)
		return err
	}
	feeApi, err := repo.GetFeeAPI(path.Join(expPath, "config"))
	if err != nil {
		log.Error(err)
		return err
	}
	low, medium, high, err := repo.GetDefaultFees(path.Join(expPath, "config"))
	if err != nil {
		log.Error(err)
		return err
	}
	wallet := libbitcoin.NewLibbitcoinWallet(mn, &params, sqliteDB, libbitcoinServers, maxFee, low, medium, high, feeApi)

	// Offline messaging storage
	var storage sto.OfflineMessagingStorage
	if x.Storage == "self-hosted" || x.Storage == "" {
		storage = selfhosted.NewSelfHostedStorage(expPath, ctx)
	} else if x.Storage == "dropbox" {
		token, err := repo.GetDropboxApiToken(path.Join(expPath, "config"))
		if err != nil {
			log.Error(err)
			return err
		} else if token == "" {
			err = errors.New("Dropbox token not set in config file")
			log.Error(err)
			return err
		}
		storage, err = dropbox.NewDropBoxStorage(token)
		if err != nil {
			log.Error(err)
			return err
		}
	} else {
		err = errors.New("Invalid storage option")
		log.Error(err)
		return err
	}

	// OpenBazaar node setup
	core.Node = &core.OpenBazaarNode{
		Context:        ctx,
		IpfsNode:       nd,
		RootHash:       ipath.Path(e.Value).String(),
		RepoPath:       expPath,
		Datastore:      sqliteDB,
		Wallet:         wallet,
		MessageStorage: storage,
	}

	var gwErrc <-chan error
	var cb <-chan bool
	if len(cfg.Addresses.Gateway) > 0 {
		var err error
		err, cb, gwErrc = serveHTTPGateway(core.Node)
		if err != nil {
			log.Error(err)
			return err
		}
	}

	// Wait for gateway to start before starting the network service.
	// This way the websocket channel we pass into the service gets created first.
	// FIXME: There has to be a better way
	for b := range cb {
		if b == true {
			OBService := service.SetupOpenBazaarService(nd, core.Node.Broadcast, ctx, sqliteDB)
			core.Node.Service = OBService
			MR := net.NewMessageRetriever(sqliteDB, ctx, nd, OBService, 16, core.Node.SendOfflineAck)
			go MR.Run()
			core.Node.MessageRetriever = MR
			PR := net.NewPointerRepublisher(nd, sqliteDB)
			go PR.Run()
			core.Node.PointerRepublisher = PR
		}
		break
	}

	for err := range gwErrc {
		fmt.Println(err)
	}

	return nil
}
Ejemplo n.º 23
0
func run() error {
	servers := config.DefaultSNRServers
	fmt.Println("using gcr remotes:")
	for _, p := range servers {
		fmt.Println("\t", p)
	}

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cwd, err := os.Getwd()
	if err != nil {
		return err
	}
	repoPath := gopath.Join(cwd, config.DefaultPathName)
	if err := ensureRepoInitialized(repoPath); err != nil {
	}
	repo, err := fsrepo.Open(repoPath)
	if err != nil { // owned by node
		return err
	}
	cfg, err := repo.Config()
	if err != nil {
		return err
	}

	cfg.Bootstrap = servers
	if err := repo.SetConfig(cfg); err != nil {
		return err
	}

	var addrs []ipfsaddr.IPFSAddr
	for _, info := range servers {
		addr, err := ipfsaddr.ParseString(info)
		if err != nil {
			return err
		}
		addrs = append(addrs, addr)
	}

	var infos []peer.PeerInfo
	for _, addr := range addrs {
		infos = append(infos, peer.PeerInfo{
			ID:    addr.ID(),
			Addrs: []ma.Multiaddr{addr.Transport()},
		})
	}

	node, err := core.NewNode(ctx, &core.BuildCfg{
		Online:  true,
		Repo:    repo,
		Routing: corerouting.SupernodeClient(infos...),
	})
	if err != nil {
		return err
	}
	defer node.Close()

	opts := []corehttp.ServeOption{
		corehttp.CommandsOption(cmdCtx(node, repoPath)),
		corehttp.GatewayOption(false),
	}

	if *cat {
		if err := runFileCattingWorker(ctx, node); err != nil {
			return err
		}
	} else {
		if err := runFileAddingWorker(node); err != nil {
			return err
		}
	}
	return corehttp.ListenAndServe(node, cfg.Addresses.API, opts...)
}
Ejemplo n.º 24
0
// CliCheckForUpdates is the automatic update check from the commandline.
func CliCheckForUpdates(cfg *config.Config, repoPath string) error {

	// if config says not to, don't check for updates
	if !cfg.Version.ShouldCheckForUpdate() {
		log.Info("update check skipped.")
		return nil
	}

	log.Info("checking for update")
	u, err := CheckForUpdate()
	// if there is no update available, record it, and exit. NB:  only record
	// if we checked successfully.
	if err == ErrNoUpdateAvailable {
		log.Infof("No update available, checked on %s", time.Now())
		r, err := fsrepo.Open(repoPath)
		if err != nil {
			return err
		}
		if err := recordUpdateCheck(cfg); err != nil {
			return err
		}
		// NB: r's Config may be newer than cfg. This overwrites regardless.
		r.SetConfig(cfg)
		if err := r.Close(); err != nil {
			return err
		}
		return nil
	}

	// if another, unexpected error occurred, note it.
	if err != nil {
		log.Debugf("Error while checking for update: %v", err)
		return nil
	}

	// there is an update available

	// if we autoupdate
	if cfg.Version.AutoUpdate != config.AutoUpdateNever {
		// and we should auto update
		if ShouldAutoUpdate(cfg.Version.AutoUpdate, u.Version) {
			log.Infof("Applying update %s", u.Version)

			if err = Apply(u); err != nil {
				log.Debug(err)
				return nil
			}

			// BUG(cryptix): no good way to restart yet. - tracking https://github.com/inconshreveable/go-update/issues/5
			fmt.Printf("update %v applied. please restart.\n", u.Version)
			os.Exit(0)
		}
	}

	// autoupdate did not exit, so regular notices.
	switch cfg.Version.Check {
	case config.CheckError:
		return fmt.Errorf(errShouldUpdate, Version, u.Version)
	case config.CheckWarn:
		// print the warning
		fmt.Printf("New version available: %s\n", u.Version)
	default: // ignore
	}
	return nil
}
Ejemplo n.º 25
0
`,
	},

	Arguments: []cmds.Argument{
		cmds.StringArg("key", true, false, "The key of the config entry (e.g. \"Addresses.API\")."),
		cmds.StringArg("value", false, false, "The value to set the config entry to."),
	},
	Options: []cmds.Option{
		cmds.BoolOption("bool", "Set a boolean value."),
		cmds.BoolOption("json", "Parse stringified JSON."),
	},
	Run: func(req cmds.Request, res cmds.Response) {
		args := req.Arguments()
		key := args[0]

		r, err := fsrepo.Open(req.InvocContext().ConfigRoot)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
		defer r.Close()

		var output *ConfigField
		if len(args) == 2 {
			value := args[1]

			if parseJson, _, _ := req.Option("json").Bool(); parseJson {
				var jsonVal interface{}
				if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
					err = fmt.Errorf("failed to unmarshal json. %s", err)
					res.SetError(err, cmds.ErrNormal)
Ejemplo n.º 26
0
func daemonFunc(req cmds.Request, res cmds.Response) {
	// let the user know we're going.
	fmt.Printf("Initializing daemon...\n")

	ctx := req.InvocContext()

	go func() {
		select {
		case <-req.Context().Done():
			fmt.Println("Received interrupt signal, shutting down...")
		}
	}()

	// check transport encryption flag.
	unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool()
	if unencrypted {
		log.Warningf(`Running with --%s: All connections are UNENCRYPTED.
		You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
		conn.EncryptConnections = false
	}

	// first, whether user has provided the initialization flag. we may be
	// running in an uninitialized state.
	initialize, _, err := req.Option(initOptionKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	if initialize {

		// now, FileExists is our best method of detecting whether IPFS is
		// configured. Consider moving this into a config helper method
		// `IsInitialized` where the quality of the signal can be improved over
		// time, and many call-sites can benefit.
		if !util.FileExists(req.InvocContext().ConfigRoot) {
			err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
		}
	}

	// acquire the repo lock _before_ constructing a node. we need to make
	// sure we are permitted to access the resources (datastore, etc.)
	repo, err := fsrepo.Open(req.InvocContext().ConfigRoot)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	cfg, err := ctx.GetConfig()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// Start assembling node config
	ncfg := &core.BuildCfg{
		Online: true,
		Repo:   repo,
	}

	routingOption, _, err := req.Option(routingOptionKwd).String()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if routingOption == routingOptionSupernodeKwd {
		servers, err := repo.Config().SupernodeRouting.ServerIPFSAddrs()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			repo.Close() // because ownership hasn't been transferred to the node
			return
		}
		var infos []peer.PeerInfo
		for _, addr := range servers {
			infos = append(infos, peer.PeerInfo{
				ID:    addr.ID(),
				Addrs: []ma.Multiaddr{addr.Transport()},
			})
		}

		ncfg.Routing = corerouting.SupernodeClient(infos...)
	}

	node, err := core.NewNode(req.Context(), ncfg)
	if err != nil {
		log.Error("error from node construction: ", err)
		res.SetError(err, cmds.ErrNormal)
		return
	}

	printSwarmAddrs(node)

	defer func() {
		// We wait for the node to close first, as the node has children
		// that it will wait for before closing, such as the API server.
		node.Close()

		select {
		case <-req.Context().Done():
			log.Info("Gracefully shut down daemon")
		default:
		}
	}()

	req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) {
		return node, nil
	}

	// construct api endpoint - every time
	err, apiErrc := serveHTTPApi(req)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// construct http gateway - if it is set in the config
	var gwErrc <-chan error
	if len(cfg.Addresses.Gateway) > 0 {
		var err error
		err, gwErrc = serveHTTPGateway(req)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// construct fuse mountpoints - if the user provided the --mount flag
	mount, _, err := req.Option(mountKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if mount {
		if err := mountFuse(req); err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	fmt.Printf("Daemon is ready\n")
	// collect long-running errors and block for shutdown
	// TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown
	for err := range merge(apiErrc, gwErrc) {
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}
}
Ejemplo n.º 27
0
func daemonFunc(req cmds.Request, res cmds.Response) {
	// Inject metrics before we do anything

	err := mprome.Inject()
	if err != nil {
		log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
	}

	// let the user know we're going.
	fmt.Printf("Initializing daemon...\n")

	managefd, _, _ := req.Option(adjustFDLimitKwd).Bool()
	if managefd {
		if err := fileDescriptorCheck(); err != nil {
			log.Errorf("setting file descriptor limit: %s", err)
		}
	}

	ctx := req.InvocContext()

	go func() {
		select {
		case <-req.Context().Done():
			fmt.Println("Received interrupt signal, shutting down...")
			fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)")
		}
	}()

	// check transport encryption flag.
	unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool()
	if unencrypted {
		log.Warningf(`Running with --%s: All connections are UNENCRYPTED.
		You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
		iconn.EncryptConnections = false
	}

	// first, whether user has provided the initialization flag. we may be
	// running in an uninitialized state.
	initialize, _, err := req.Option(initOptionKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	if initialize {

		// now, FileExists is our best method of detecting whether ipfs is
		// configured. Consider moving this into a config helper method
		// `IsInitialized` where the quality of the signal can be improved over
		// time, and many call-sites can benefit.
		if !util.FileExists(req.InvocContext().ConfigRoot) {
			err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
		}
	}

	// acquire the repo lock _before_ constructing a node. we need to make
	// sure we are permitted to access the resources (datastore, etc.)
	repo, err := fsrepo.Open(req.InvocContext().ConfigRoot)
	switch err {
	default:
		res.SetError(err, cmds.ErrNormal)
		return
	case fsrepo.ErrNeedMigration:
		domigrate, found, _ := req.Option(migrateKwd).Bool()
		fmt.Println("Found outdated fs-repo, migrations need to be run.")

		if !found {
			domigrate = YesNoPrompt("Run migrations now? [y/N]")
		}

		if !domigrate {
			fmt.Println("Not running migrations of fs-repo now.")
			fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io")
			res.SetError(fmt.Errorf("fs-repo requires migration"), cmds.ErrNormal)
			return
		}

		err = migrate.RunMigration(fsrepo.RepoVersion)
		if err != nil {
			fmt.Println("The migrations of fs-repo failed:")
			fmt.Printf("  %s\n", err)
			fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
			fmt.Println("  https://github.com/ipfs/fs-repo-migrations")
			res.SetError(err, cmds.ErrNormal)
			return
		}

		repo, err = fsrepo.Open(req.InvocContext().ConfigRoot)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	case nil:
		break
	}

	cfg, err := ctx.GetConfig()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	offline, _, _ := req.Option(offlineKwd).Bool()
	pubsub, _, _ := req.Option(enableFloodSubKwd).Bool()

	// Start assembling node config
	ncfg := &core.BuildCfg{
		Repo:      repo,
		Permament: true, // It is temporary way to signify that node is permament
		Online:    !offline,
		ExtraOpts: map[string]bool{
			"pubsub": pubsub,
		},
		//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
	}

	routingOption, _, err := req.Option(routingOptionKwd).String()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	switch routingOption {
	case routingOptionSupernodeKwd:
		servers, err := cfg.SupernodeRouting.ServerIPFSAddrs()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			repo.Close() // because ownership hasn't been transferred to the node
			return
		}
		var infos []pstore.PeerInfo
		for _, addr := range servers {
			infos = append(infos, pstore.PeerInfo{
				ID:    addr.ID(),
				Addrs: []ma.Multiaddr{addr.Transport()},
			})
		}

		ncfg.Routing = corerouting.SupernodeClient(infos...)
	case routingOptionDHTClientKwd:
		ncfg.Routing = core.DHTClientOption
	}

	node, err := core.NewNode(req.Context(), ncfg)
	if err != nil {
		log.Error("error from node construction: ", err)
		res.SetError(err, cmds.ErrNormal)
		return
	}
	node.SetLocal(false)

	printSwarmAddrs(node)

	defer func() {
		// We wait for the node to close first, as the node has children
		// that it will wait for before closing, such as the API server.
		node.Close()

		select {
		case <-req.Context().Done():
			log.Info("Gracefully shut down daemon")
		default:
		}
	}()

	req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) {
		return node, nil
	}

	// construct api endpoint - every time
	err, apiErrc := serveHTTPApi(req)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// construct http gateway - if it is set in the config
	var gwErrc <-chan error
	if len(cfg.Addresses.Gateway) > 0 {
		var err error
		err, gwErrc = serveHTTPGateway(req)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// construct fuse mountpoints - if the user provided the --mount flag
	mount, _, err := req.Option(mountKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if mount && offline {
		res.SetError(errors.New("mount is not currently supported in offline mode"),
			cmds.ErrClient)
		return
	}
	if mount {
		if err := mountFuse(req); err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// repo blockstore GC - if --enable-gc flag is present
	err, gcErrc := maybeRunGC(req, node)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// initialize metrics collector
	prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node})

	fmt.Printf("Daemon is ready\n")
	// collect long-running errors and block for shutdown
	// TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown
	for err := range merge(apiErrc, gwErrc, gcErrc) {
		if err != nil {
			log.Error(err)
			res.SetError(err, cmds.ErrNormal)
		}
	}
	return
}