Beispiel #1
0
Datei: tour.go Projekt: rht/ipget
func tourRunFunc(req cmds.Request, res cmds.Response) {

	cfg, err := req.InvocContext().GetConfig()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	id := tour.TopicID(cfg.Tour.Last)
	if len(req.Arguments()) > 0 {
		id = tour.TopicID(req.Arguments()[0])
	}

	w := new(bytes.Buffer)
	t, err := tourGet(id)
	if err != nil {

		// If no topic exists for this id, we handle this error right here.
		// To help the user achieve the task, we construct a response
		// comprised of...
		// 1) a simple error message
		// 2) the full list of topics

		fmt.Fprintln(w, "ERROR")
		fmt.Fprintln(w, err)
		fmt.Fprintln(w, "")
		fprintTourList(w, tour.TopicID(cfg.Tour.Last))
		res.SetOutput(w)

		return
	}

	fprintTourShow(w, t)
	res.SetOutput(w)
}
Beispiel #2
0
// read json objects off of the given stream, and write the objects out to
// the 'out' channel
func readStreamedJson(req cmds.Request, rr io.Reader, out chan<- interface{}, resp cmds.Response) {
	defer close(out)
	dec := json.NewDecoder(rr)
	outputType := reflect.TypeOf(req.Command().Type)

	ctx := req.Context()

	for {
		v, err := decodeTypedVal(outputType, dec)
		if err != nil {
			if err != io.EOF {
				log.Error(err)
				resp.SetError(err, cmds.ErrNormal)
			}
			return
		}

		select {
		case <-ctx.Done():
			return
		case out <- v:
		}
	}
}
Beispiel #3
0
func daemonFunc(req cmds.Request, res cmds.Response) {
	// let the user know we're going.
	fmt.Printf("Initializing daemon...\n")

	ctx := req.InvocContext()

	go func() {
		select {
		case <-req.Context().Done():
			fmt.Println("Received interrupt signal, shutting down...")
		}
	}()

	// check transport encryption flag.
	unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool()
	if unencrypted {
		log.Warningf(`Running with --%s: All connections are UNENCRYPTED.
		You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
		conn.EncryptConnections = false
	}

	// first, whether user has provided the initialization flag. we may be
	// running in an uninitialized state.
	initialize, _, err := req.Option(initOptionKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	if initialize {

		// now, FileExists is our best method of detecting whether IPFS is
		// configured. Consider moving this into a config helper method
		// `IsInitialized` where the quality of the signal can be improved over
		// time, and many call-sites can benefit.
		if !util.FileExists(req.InvocContext().ConfigRoot) {
			err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
		}
	}

	// acquire the repo lock _before_ constructing a node. we need to make
	// sure we are permitted to access the resources (datastore, etc.)
	repo, err := fsrepo.Open(req.InvocContext().ConfigRoot)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	cfg, err := ctx.GetConfig()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// Start assembling node config
	ncfg := &core.BuildCfg{
		Online: true,
		Repo:   repo,
	}

	routingOption, _, err := req.Option(routingOptionKwd).String()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if routingOption == routingOptionSupernodeKwd {
		servers, err := repo.Config().SupernodeRouting.ServerIPFSAddrs()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			repo.Close() // because ownership hasn't been transferred to the node
			return
		}
		var infos []peer.PeerInfo
		for _, addr := range servers {
			infos = append(infos, peer.PeerInfo{
				ID:    addr.ID(),
				Addrs: []ma.Multiaddr{addr.Transport()},
			})
		}

		ncfg.Routing = corerouting.SupernodeClient(infos...)
	}

	node, err := core.NewNode(req.Context(), ncfg)
	if err != nil {
		log.Error("error from node construction: ", err)
		res.SetError(err, cmds.ErrNormal)
		return
	}

	printSwarmAddrs(node)

	defer func() {
		// We wait for the node to close first, as the node has children
		// that it will wait for before closing, such as the API server.
		node.Close()

		select {
		case <-req.Context().Done():
			log.Info("Gracefully shut down daemon")
		default:
		}
	}()

	req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) {
		return node, nil
	}

	// construct api endpoint - every time
	err, apiErrc := serveHTTPApi(req)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// construct http gateway - if it is set in the config
	var gwErrc <-chan error
	if len(cfg.Addresses.Gateway) > 0 {
		var err error
		err, gwErrc = serveHTTPGateway(req)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// construct fuse mountpoints - if the user provided the --mount flag
	mount, _, err := req.Option(mountKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if mount {
		if err := mountFuse(req); err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	fmt.Printf("Daemon is ready\n")
	// collect long-running errors and block for shutdown
	// TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown
	for err := range merge(apiErrc, gwErrc) {
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}
}
Beispiel #4
0
func daemonFunc(req cmds.Request, res cmds.Response) {
	// Inject metrics before we do anything

	err := mprome.Inject()
	if err != nil {
		log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
	}

	// let the user know we're going.
	fmt.Printf("Initializing daemon...\n")

	managefd, _, _ := req.Option(adjustFDLimitKwd).Bool()
	if managefd {
		if err := fileDescriptorCheck(); err != nil {
			log.Errorf("setting file descriptor limit: %s", err)
		}
	}

	ctx := req.InvocContext()

	go func() {
		select {
		case <-req.Context().Done():
			fmt.Println("Received interrupt signal, shutting down...")
			fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)")
		}
	}()

	// check transport encryption flag.
	unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool()
	if unencrypted {
		log.Warningf(`Running with --%s: All connections are UNENCRYPTED.
		You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
		iconn.EncryptConnections = false
	}

	// first, whether user has provided the initialization flag. we may be
	// running in an uninitialized state.
	initialize, _, err := req.Option(initOptionKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	if initialize {

		// now, FileExists is our best method of detecting whether ipfs is
		// configured. Consider moving this into a config helper method
		// `IsInitialized` where the quality of the signal can be improved over
		// time, and many call-sites can benefit.
		if !util.FileExists(req.InvocContext().ConfigRoot) {
			err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
		}
	}

	// acquire the repo lock _before_ constructing a node. we need to make
	// sure we are permitted to access the resources (datastore, etc.)
	repo, err := fsrepo.Open(req.InvocContext().ConfigRoot)
	switch err {
	default:
		res.SetError(err, cmds.ErrNormal)
		return
	case fsrepo.ErrNeedMigration:
		domigrate, found, _ := req.Option(migrateKwd).Bool()
		fmt.Println("Found outdated fs-repo, migrations need to be run.")

		if !found {
			domigrate = YesNoPrompt("Run migrations now? [y/N]")
		}

		if !domigrate {
			fmt.Println("Not running migrations of fs-repo now.")
			fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io")
			res.SetError(fmt.Errorf("fs-repo requires migration"), cmds.ErrNormal)
			return
		}

		err = migrate.RunMigration(fsrepo.RepoVersion)
		if err != nil {
			fmt.Println("The migrations of fs-repo failed:")
			fmt.Printf("  %s\n", err)
			fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
			fmt.Println("  https://github.com/ipfs/fs-repo-migrations")
			res.SetError(err, cmds.ErrNormal)
			return
		}

		repo, err = fsrepo.Open(req.InvocContext().ConfigRoot)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	case nil:
		break
	}

	cfg, err := ctx.GetConfig()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	offline, _, _ := req.Option(offlineKwd).Bool()
	pubsub, _, _ := req.Option(enableFloodSubKwd).Bool()

	// Start assembling node config
	ncfg := &core.BuildCfg{
		Repo:      repo,
		Permament: true, // It is temporary way to signify that node is permament
		Online:    !offline,
		ExtraOpts: map[string]bool{
			"pubsub": pubsub,
		},
		//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
	}

	routingOption, _, err := req.Option(routingOptionKwd).String()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	switch routingOption {
	case routingOptionSupernodeKwd:
		servers, err := cfg.SupernodeRouting.ServerIPFSAddrs()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			repo.Close() // because ownership hasn't been transferred to the node
			return
		}
		var infos []pstore.PeerInfo
		for _, addr := range servers {
			infos = append(infos, pstore.PeerInfo{
				ID:    addr.ID(),
				Addrs: []ma.Multiaddr{addr.Transport()},
			})
		}

		ncfg.Routing = corerouting.SupernodeClient(infos...)
	case routingOptionDHTClientKwd:
		ncfg.Routing = core.DHTClientOption
	}

	node, err := core.NewNode(req.Context(), ncfg)
	if err != nil {
		log.Error("error from node construction: ", err)
		res.SetError(err, cmds.ErrNormal)
		return
	}
	node.SetLocal(false)

	printSwarmAddrs(node)

	defer func() {
		// We wait for the node to close first, as the node has children
		// that it will wait for before closing, such as the API server.
		node.Close()

		select {
		case <-req.Context().Done():
			log.Info("Gracefully shut down daemon")
		default:
		}
	}()

	req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) {
		return node, nil
	}

	// construct api endpoint - every time
	err, apiErrc := serveHTTPApi(req)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// construct http gateway - if it is set in the config
	var gwErrc <-chan error
	if len(cfg.Addresses.Gateway) > 0 {
		var err error
		err, gwErrc = serveHTTPGateway(req)
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// construct fuse mountpoints - if the user provided the --mount flag
	mount, _, err := req.Option(mountKwd).Bool()
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}
	if mount && offline {
		res.SetError(errors.New("mount is not currently supported in offline mode"),
			cmds.ErrClient)
		return
	}
	if mount {
		if err := mountFuse(req); err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}
	}

	// repo blockstore GC - if --enable-gc flag is present
	err, gcErrc := maybeRunGC(req, node)
	if err != nil {
		res.SetError(err, cmds.ErrNormal)
		return
	}

	// initialize metrics collector
	prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node})

	fmt.Printf("Daemon is ready\n")
	// collect long-running errors and block for shutdown
	// TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown
	for err := range merge(apiErrc, gwErrc, gcErrc) {
		if err != nil {
			log.Error(err)
			res.SetError(err, cmds.ErrNormal)
		}
	}
	return
}