func (i *cmdInvocation) constructNodeFunc(ctx context.Context) func() (*core.IpfsNode, error) { return func() (*core.IpfsNode, error) { if i.req == nil { return nil, errors.New("constructing node without a request") } cmdctx := i.req.InvocContext() if cmdctx == nil { return nil, errors.New("constructing node without a request context") } r, err := fsrepo.Open(i.req.InvocContext().ConfigRoot) if err != nil { // repo is owned by the node return nil, err } // ok everything is good. set it on the invocation (for ownership) // and return it. n, err := core.NewNode(ctx, &core.BuildCfg{ Online: cmdctx.Online, Repo: r, }) if err != nil { return nil, err } i.node = n return i.node, nil } }
func addDefaultAssets(out io.Writer, repoRoot string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := fsrepo.Open(repoRoot) if err != nil { // NB: repo is owned by the node return err } nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { return err } defer nd.Close() dkey, err := assets.SeedInitDocs(nd) if err != nil { return fmt.Errorf("init: seeding init docs failed: %s", err) } log.Debugf("init: seeded init docs %s", dkey) if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil { return err } _, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey) return err }
// TODO share func func writeConfig(path string, cfg *config.Config) error { // NB: This needs to run on the daemon. r, err := fsrepo.Open(path) if err != nil { return err } defer r.Close() return r.SetConfig(cfg) }
func initializeIpnsKeyspace(repoRoot string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := fsrepo.Open(repoRoot) if err != nil { // NB: repo is owned by the node return err } nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { return err } defer nd.Close() err = nd.SetupOfflineRouting() if err != nil { return err } return namesys.InitializeKeyspace(ctx, nd.DAG, nd.Namesys, nd.Pinning, nd.PrivateKey) }
Arguments: []cmds.Argument{ cmds.StringArg("peer", false, true, peerOptionDesc).EnableStdin(), }, Options: []cmds.Option{ cmds.BoolOption("default", "add default bootstrap nodes"), }, Run: func(req cmds.Request, res cmds.Response) { inputPeers, err := config.ParseBootstrapPeers(req.Arguments()) if err != nil { res.SetError(err, cmds.ErrNormal) return } r, err := fsrepo.Open(req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } defer r.Close() cfg, err := r.Config() if err != nil { res.SetError(err, cmds.ErrNormal) return } deflt, _, err := req.Option("default").Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return
func run() error { servers := config.DefaultSNRServers fmt.Println("using gcr remotes:") for _, p := range servers { fmt.Println("\t", p) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() cwd, err := os.Getwd() if err != nil { return err } repoPath := gopath.Join(cwd, config.DefaultPathName) if err := ensureRepoInitialized(repoPath); err != nil { } repo, err := fsrepo.Open(repoPath) if err != nil { // owned by node return err } cfg, err := repo.Config() if err != nil { return err } cfg.Bootstrap = servers if err := repo.SetConfig(cfg); err != nil { return err } var addrs []ipfsaddr.IPFSAddr for _, info := range servers { addr, err := ipfsaddr.ParseString(info) if err != nil { return err } addrs = append(addrs, addr) } var infos []peer.PeerInfo for _, addr := range addrs { infos = append(infos, peer.PeerInfo{ ID: addr.ID(), Addrs: []ma.Multiaddr{addr.Transport()}, }) } node, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Repo: repo, Routing: corerouting.SupernodeClient(infos...), }) if err != nil { return err } defer node.Close() opts := []corehttp.ServeOption{ corehttp.CommandsOption(cmdCtx(node, repoPath)), corehttp.GatewayOption(false), } if *cat { if err := runFileCattingWorker(ctx, node); err != nil { return err } } else { if err := runFileAddingWorker(node); err != nil { return err } } return corehttp.ListenAndServe(node, cfg.Addresses.API, opts...) }
func daemonFunc(req cmds.Request, res cmds.Response) { // let the user know we're going. fmt.Printf("Initializing daemon...\n") ctx := req.InvocContext() go func() { select { case <-req.Context().Done(): fmt.Println("Received interrupt signal, shutting down...") } }() // check transport encryption flag. unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool() if unencrypted { log.Warningf(`Running with --%s: All connections are UNENCRYPTED. You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd) conn.EncryptConnections = false } // first, whether user has provided the initialization flag. we may be // running in an uninitialized state. initialize, _, err := req.Option(initOptionKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if initialize { // now, FileExists is our best method of detecting whether IPFS is // configured. Consider moving this into a config helper method // `IsInitialized` where the quality of the signal can be improved over // time, and many call-sites can benefit. if !util.FileExists(req.InvocContext().ConfigRoot) { err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } } } // acquire the repo lock _before_ constructing a node. we need to make // sure we are permitted to access the resources (datastore, etc.) repo, err := fsrepo.Open(req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } cfg, err := ctx.GetConfig() if err != nil { res.SetError(err, cmds.ErrNormal) return } // Start assembling node config ncfg := &core.BuildCfg{ Online: true, Repo: repo, } routingOption, _, err := req.Option(routingOptionKwd).String() if err != nil { res.SetError(err, cmds.ErrNormal) return } if routingOption == routingOptionSupernodeKwd { servers, err := cfg.SupernodeRouting.ServerIPFSAddrs() if err != nil { res.SetError(err, cmds.ErrNormal) repo.Close() // because ownership hasn't been transferred to the node return } var infos []peer.PeerInfo for _, addr := range servers { infos = append(infos, peer.PeerInfo{ ID: addr.ID(), Addrs: []ma.Multiaddr{addr.Transport()}, }) } ncfg.Routing = corerouting.SupernodeClient(infos...) } node, err := core.NewNode(req.Context(), ncfg) if err != nil { log.Error("error from node construction: ", err) res.SetError(err, cmds.ErrNormal) return } printSwarmAddrs(node) defer func() { // We wait for the node to close first, as the node has children // that it will wait for before closing, such as the API server. node.Close() select { case <-req.Context().Done(): log.Info("Gracefully shut down daemon") default: } }() req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) { return node, nil } // construct api endpoint - every time err, apiErrc := serveHTTPApi(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } // construct http gateway - if it is set in the config var gwErrc <-chan error if len(cfg.Addresses.Gateway) > 0 { var err error err, gwErrc = serveHTTPGateway(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } } // construct fuse mountpoints - if the user provided the --mount flag mount, _, err := req.Option(mountKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if mount { if err := mountFuse(req); err != nil { res.SetError(err, cmds.ErrNormal) return } } fmt.Printf("Daemon is ready\n") // collect long-running errors and block for shutdown // TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown for err := range merge(apiErrc, gwErrc) { if err != nil { res.SetError(err, cmds.ErrNormal) return } } }
// CliCheckForUpdates is the automatic update check from the commandline. func CliCheckForUpdates(cfg *config.Config, repoPath string) error { // if config says not to, don't check for updates if !cfg.Version.ShouldCheckForUpdate() { log.Info("update check skipped.") return nil } log.Info("checking for update") u, err := CheckForUpdate() // if there is no update available, record it, and exit. NB: only record // if we checked successfully. if err == ErrNoUpdateAvailable { log.Infof("No update available, checked on %s", time.Now()) r, err := fsrepo.Open(repoPath) if err != nil { return err } if err := recordUpdateCheck(cfg); err != nil { return err } // NB: r's Config may be newer than cfg. This overwrites regardless. r.SetConfig(cfg) if err := r.Close(); err != nil { return err } return nil } // if another, unexpected error occurred, note it. if err != nil { log.Debugf("Error while checking for update: %v", err) return nil } // there is an update available // if we autoupdate if cfg.Version.AutoUpdate != config.AutoUpdateNever { // and we should auto update if ShouldAutoUpdate(cfg.Version.AutoUpdate, u.Version) { log.Infof("Applying update %s", u.Version) if err = Apply(u); err != nil { log.Debug(err) return nil } // BUG(cryptix): no good way to restart yet. - tracking https://github.com/inconshreveable/go-update/issues/5 fmt.Printf("update %v applied. please restart.\n", u.Version) os.Exit(0) } } // autoupdate did not exit, so regular notices. switch cfg.Version.Check { case config.CheckError: return fmt.Errorf(errShouldUpdate, Version, u.Version) case config.CheckWarn: // print the warning fmt.Printf("New version available: %s\n", u.Version) default: // ignore } return nil }
func run(ipfsPath, watchPath string) error { proc := process.WithParent(process.Background()) log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath) ipfsPath, err := homedir.Expand(ipfsPath) if err != nil { return err } watcher, err := fsnotify.NewWatcher() if err != nil { return err } defer watcher.Close() if err := addTree(watcher, watchPath); err != nil { return err } r, err := fsrepo.Open(ipfsPath) if err != nil { // TODO handle case: daemon running // TODO handle case: repo doesn't exist or isn't initialized return err } node, err := core.NewNode(context.Background(), &core.BuildCfg{ Online: true, Repo: r, }) if err != nil { return err } defer node.Close() if *http { addr := "/ip4/127.0.0.1/tcp/5001" var opts = []corehttp.ServeOption{ corehttp.GatewayOption(true), corehttp.WebUIOption, corehttp.CommandsOption(cmdCtx(node, ipfsPath)), } proc.Go(func(p process.Process) { if err := corehttp.ListenAndServe(node, addr, opts...); err != nil { return } }) } interrupts := make(chan os.Signal) signal.Notify(interrupts, os.Interrupt, os.Kill) for { select { case <-interrupts: return nil case e := <-watcher.Events: log.Printf("received event: %s", e) isDir, err := IsDirectory(e.Name) if err != nil { continue } switch e.Op { case fsnotify.Remove: if isDir { if err := watcher.Remove(e.Name); err != nil { return err } } default: // all events except for Remove result in an IPFS.Add, but only // directory creation triggers a new watch switch e.Op { case fsnotify.Create: if isDir { addTree(watcher, e.Name) } } proc.Go(func(p process.Process) { file, err := os.Open(e.Name) if err != nil { log.Println(err) } defer file.Close() k, err := coreunix.Add(node, file) if err != nil { log.Println(err) } log.Printf("added %s... key: %s", e.Name, k) }) } case err := <-watcher.Errors: log.Println(err) } } return nil }