// KeyListTextMarshaler outputs a KeyList as plaintext, one key per line func KeyListTextMarshaler(res cmds.Response) (io.Reader, error) { output := res.Output().(*KeyList) buf := new(bytes.Buffer) for _, key := range output.Keys { buf.WriteString(key.B58String() + "\n") } return buf, nil }
func objectMarshaler(res cmds.Response) (io.Reader, error) { o, ok := res.Output().(*Object) if !ok { return nil, u.ErrCast() } return strings.NewReader(o.Hash + "\n"), nil }
func bootstrapMarshaler(res cmds.Response) (io.Reader, error) { v, ok := res.Output().(*BootstrapOutput) if !ok { return nil, u.ErrCast() } buf := new(bytes.Buffer) err := bootstrapWritePeers(buf, "", v.Peers) return buf, err }
func guessMimeType(res cmds.Response) (string, error) { // Try to guess mimeType from the encoding option enc, found, err := res.Request().Option(cmds.EncShort).String() if err != nil { return "", err } if !found { return "", errors.New("no encoding option set") } return mimeTypes[enc], nil }
func stringListMarshaler(res cmds.Response) (io.Reader, error) { list, ok := res.Output().(*stringList) if !ok { return nil, errors.New("failed to cast []string") } buf := new(bytes.Buffer) for _, s := range list.Strings { buf.WriteString(s) buf.WriteString("\n") } return buf, nil }
func tourRunFunc(req cmds.Request, res cmds.Response) { cfg, err := req.InvocContext().GetConfig() if err != nil { res.SetError(err, cmds.ErrNormal) return } id := tour.TopicID(cfg.Tour.Last) if len(req.Arguments()) > 0 { id = tour.TopicID(req.Arguments()[0]) } w := new(bytes.Buffer) t, err := tourGet(id) if err != nil { // If no topic exists for this id, we handle this error right here. // To help the user achieve the task, we construct a response // comprised of... // 1) a simple error message // 2) the full list of topics fmt.Fprintln(w, "ERROR") fmt.Fprintln(w, err) fmt.Fprintln(w, "") fprintTourList(w, tour.TopicID(cfg.Tour.Last)) res.SetOutput(w) return } fprintTourShow(w, t) res.SetOutput(w) }
// read json objects off of the given stream, and write the objects out to // the 'out' channel func readStreamedJson(req cmds.Request, rr io.Reader, out chan<- interface{}, resp cmds.Response) { defer close(out) dec := json.NewDecoder(rr) outputType := reflect.TypeOf(req.Command().Type) ctx := req.Context() for { v, err := decodeTypedVal(outputType, dec) if err != nil { if err != io.EOF { log.Error(err) resp.SetError(err, cmds.ErrNormal) } return } select { case <-ctx.Done(): return case out <- v: } } }
func MessageTextMarshaler(res cmds.Response) (io.Reader, error) { return strings.NewReader(res.Output().(*MessageOutput).Message), nil }
func sendResponse(w http.ResponseWriter, r *http.Request, res cmds.Response, req cmds.Request) { mime, err := guessMimeType(res) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } status := http.StatusOK // if response contains an error, write an HTTP error status code if e := res.Error(); e != nil { if e.Code == cmds.ErrClient { status = http.StatusBadRequest } else { status = http.StatusInternalServerError } // NOTE: The error will actually be written out by the reader below } out, err := res.Reader() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } h := w.Header() // Set up our potential trailer h.Set("Trailer", StreamErrHeader) if res.Length() > 0 { h.Set(contentLengthHeader, strconv.FormatUint(res.Length(), 10)) } if _, ok := res.Output().(io.Reader); ok { // we don't set the Content-Type for streams, so that browsers can MIME-sniff the type themselves // we set this header so clients have a way to know this is an output stream // (not marshalled command output) mime = "" h.Set(streamHeader, "1") } // if output is a channel and user requested streaming channels, // use chunk copier for the output _, isChan := res.Output().(chan interface{}) if !isChan { _, isChan = res.Output().(<-chan interface{}) } if isChan { h.Set(channelHeader, "1") } if mime != "" { h.Set(contentTypeHeader, mime) } h.Set(transferEncodingHeader, "chunked") if r.Method == "HEAD" { // after all the headers. return } w.WriteHeader(status) _, err = io.Copy(w, out) if err != nil { log.Error("err: ", err) w.Header().Set(StreamErrHeader, sanitizedErrStr(err)) } }
func sendResponse(w http.ResponseWriter, r *http.Request, res cmds.Response, req cmds.Request) { mime, err := guessMimeType(res) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } status := http.StatusOK // if response contains an error, write an HTTP error status code if e := res.Error(); e != nil { if e.Code == cmds.ErrClient { status = http.StatusBadRequest } else { status = http.StatusInternalServerError } // NOTE: The error will actually be written out by the reader below } out, err := res.Reader() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } h := w.Header() // Set up our potential trailer h.Set("Trailer", StreamErrHeader) if res.Length() > 0 { h.Set(contentLengthHeader, strconv.FormatUint(res.Length(), 10)) } if _, ok := res.Output().(io.Reader); ok { // set streams output type to text to avoid issues with browsers rendering // html pages on priveleged api ports mime = "text/plain" h.Set(streamHeader, "1") } // if output is a channel and user requested streaming channels, // use chunk copier for the output _, isChan := res.Output().(chan interface{}) if !isChan { _, isChan = res.Output().(<-chan interface{}) } if isChan { h.Set(channelHeader, "1") } // catch-all, set to text as default if mime == "" { mime = "text/plain" } h.Set(contentTypeHeader, mime) h.Set(transferEncodingHeader, "chunked") // set 'allowed' headers h.Set("Access-Control-Allow-Headers", "X-Stream-Output, X-Chunked-Output") // expose those headers h.Set("Access-Control-Expose-Headers", "X-Stream-Output, X-Chunked-Output") if r.Method == "HEAD" { // after all the headers. return } w.WriteHeader(status) err = flushCopy(w, out) if err != nil { log.Error("err: ", err) w.Header().Set(StreamErrHeader, sanitizedErrStr(err)) } }
func daemonFunc(req cmds.Request, res cmds.Response) { // let the user know we're going. fmt.Printf("Initializing daemon...\n") ctx := req.InvocContext() go func() { select { case <-req.Context().Done(): fmt.Println("Received interrupt signal, shutting down...") } }() // check transport encryption flag. unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool() if unencrypted { log.Warningf(`Running with --%s: All connections are UNENCRYPTED. You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd) conn.EncryptConnections = false } // first, whether user has provided the initialization flag. we may be // running in an uninitialized state. initialize, _, err := req.Option(initOptionKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if initialize { // now, FileExists is our best method of detecting whether IPFS is // configured. Consider moving this into a config helper method // `IsInitialized` where the quality of the signal can be improved over // time, and many call-sites can benefit. if !util.FileExists(req.InvocContext().ConfigRoot) { err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } } } // acquire the repo lock _before_ constructing a node. we need to make // sure we are permitted to access the resources (datastore, etc.) repo, err := fsrepo.Open(req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } cfg, err := ctx.GetConfig() if err != nil { res.SetError(err, cmds.ErrNormal) return } // Start assembling node config ncfg := &core.BuildCfg{ Online: true, Repo: repo, } routingOption, _, err := req.Option(routingOptionKwd).String() if err != nil { res.SetError(err, cmds.ErrNormal) return } if routingOption == routingOptionSupernodeKwd { servers, err := repo.Config().SupernodeRouting.ServerIPFSAddrs() if err != nil { res.SetError(err, cmds.ErrNormal) repo.Close() // because ownership hasn't been transferred to the node return } var infos []peer.PeerInfo for _, addr := range servers { infos = append(infos, peer.PeerInfo{ ID: addr.ID(), Addrs: []ma.Multiaddr{addr.Transport()}, }) } ncfg.Routing = corerouting.SupernodeClient(infos...) } node, err := core.NewNode(req.Context(), ncfg) if err != nil { log.Error("error from node construction: ", err) res.SetError(err, cmds.ErrNormal) return } printSwarmAddrs(node) defer func() { // We wait for the node to close first, as the node has children // that it will wait for before closing, such as the API server. node.Close() select { case <-req.Context().Done(): log.Info("Gracefully shut down daemon") default: } }() req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) { return node, nil } // construct api endpoint - every time err, apiErrc := serveHTTPApi(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } // construct http gateway - if it is set in the config var gwErrc <-chan error if len(cfg.Addresses.Gateway) > 0 { var err error err, gwErrc = serveHTTPGateway(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } } // construct fuse mountpoints - if the user provided the --mount flag mount, _, err := req.Option(mountKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if mount { if err := mountFuse(req); err != nil { res.SetError(err, cmds.ErrNormal) return } } fmt.Printf("Daemon is ready\n") // collect long-running errors and block for shutdown // TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown for err := range merge(apiErrc, gwErrc) { if err != nil { res.SetError(err, cmds.ErrNormal) return } } }
func daemonFunc(req cmds.Request, res cmds.Response) { // Inject metrics before we do anything err := mprome.Inject() if err != nil { log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error()) } // let the user know we're going. fmt.Printf("Initializing daemon...\n") managefd, _, _ := req.Option(adjustFDLimitKwd).Bool() if managefd { if err := fileDescriptorCheck(); err != nil { log.Errorf("setting file descriptor limit: %s", err) } } ctx := req.InvocContext() go func() { select { case <-req.Context().Done(): fmt.Println("Received interrupt signal, shutting down...") fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)") } }() // check transport encryption flag. unencrypted, _, _ := req.Option(unencryptTransportKwd).Bool() if unencrypted { log.Warningf(`Running with --%s: All connections are UNENCRYPTED. You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd) iconn.EncryptConnections = false } // first, whether user has provided the initialization flag. we may be // running in an uninitialized state. initialize, _, err := req.Option(initOptionKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if initialize { // now, FileExists is our best method of detecting whether ipfs is // configured. Consider moving this into a config helper method // `IsInitialized` where the quality of the signal can be improved over // time, and many call-sites can benefit. if !util.FileExists(req.InvocContext().ConfigRoot) { err := initWithDefaults(os.Stdout, req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } } } // acquire the repo lock _before_ constructing a node. we need to make // sure we are permitted to access the resources (datastore, etc.) repo, err := fsrepo.Open(req.InvocContext().ConfigRoot) switch err { default: res.SetError(err, cmds.ErrNormal) return case fsrepo.ErrNeedMigration: domigrate, found, _ := req.Option(migrateKwd).Bool() fmt.Println("Found outdated fs-repo, migrations need to be run.") if !found { domigrate = YesNoPrompt("Run migrations now? [y/N]") } if !domigrate { fmt.Println("Not running migrations of fs-repo now.") fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io") res.SetError(fmt.Errorf("fs-repo requires migration"), cmds.ErrNormal) return } err = migrate.RunMigration(fsrepo.RepoVersion) if err != nil { fmt.Println("The migrations of fs-repo failed:") fmt.Printf(" %s\n", err) fmt.Println("If you think this is a bug, please file an issue and include this whole log output.") fmt.Println(" https://github.com/ipfs/fs-repo-migrations") res.SetError(err, cmds.ErrNormal) return } repo, err = fsrepo.Open(req.InvocContext().ConfigRoot) if err != nil { res.SetError(err, cmds.ErrNormal) return } case nil: break } cfg, err := ctx.GetConfig() if err != nil { res.SetError(err, cmds.ErrNormal) return } offline, _, _ := req.Option(offlineKwd).Bool() pubsub, _, _ := req.Option(enableFloodSubKwd).Bool() // Start assembling node config ncfg := &core.BuildCfg{ Repo: repo, Permament: true, // It is temporary way to signify that node is permament Online: !offline, ExtraOpts: map[string]bool{ "pubsub": pubsub, }, //TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral } routingOption, _, err := req.Option(routingOptionKwd).String() if err != nil { res.SetError(err, cmds.ErrNormal) return } switch routingOption { case routingOptionSupernodeKwd: servers, err := cfg.SupernodeRouting.ServerIPFSAddrs() if err != nil { res.SetError(err, cmds.ErrNormal) repo.Close() // because ownership hasn't been transferred to the node return } var infos []pstore.PeerInfo for _, addr := range servers { infos = append(infos, pstore.PeerInfo{ ID: addr.ID(), Addrs: []ma.Multiaddr{addr.Transport()}, }) } ncfg.Routing = corerouting.SupernodeClient(infos...) case routingOptionDHTClientKwd: ncfg.Routing = core.DHTClientOption } node, err := core.NewNode(req.Context(), ncfg) if err != nil { log.Error("error from node construction: ", err) res.SetError(err, cmds.ErrNormal) return } node.SetLocal(false) printSwarmAddrs(node) defer func() { // We wait for the node to close first, as the node has children // that it will wait for before closing, such as the API server. node.Close() select { case <-req.Context().Done(): log.Info("Gracefully shut down daemon") default: } }() req.InvocContext().ConstructNode = func() (*core.IpfsNode, error) { return node, nil } // construct api endpoint - every time err, apiErrc := serveHTTPApi(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } // construct http gateway - if it is set in the config var gwErrc <-chan error if len(cfg.Addresses.Gateway) > 0 { var err error err, gwErrc = serveHTTPGateway(req) if err != nil { res.SetError(err, cmds.ErrNormal) return } } // construct fuse mountpoints - if the user provided the --mount flag mount, _, err := req.Option(mountKwd).Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } if mount && offline { res.SetError(errors.New("mount is not currently supported in offline mode"), cmds.ErrClient) return } if mount { if err := mountFuse(req); err != nil { res.SetError(err, cmds.ErrNormal) return } } // repo blockstore GC - if --enable-gc flag is present err, gcErrc := maybeRunGC(req, node) if err != nil { res.SetError(err, cmds.ErrNormal) return } // initialize metrics collector prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node}) fmt.Printf("Daemon is ready\n") // collect long-running errors and block for shutdown // TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown for err := range merge(apiErrc, gwErrc, gcErrc) { if err != nil { log.Error(err) res.SetError(err, cmds.ErrNormal) } } return }
func sendResponse(w http.ResponseWriter, req cmds.Request, res cmds.Response) { mime, err := guessMimeType(res) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } status := http.StatusOK // if response contains an error, write an HTTP error status code if e := res.Error(); e != nil { if e.Code == cmds.ErrClient { status = http.StatusBadRequest } else { status = http.StatusInternalServerError } // NOTE: The error will actually be written out by the reader below } out, err := res.Reader() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } h := w.Header() if res.Length() > 0 { h.Set(contentLengthHeader, strconv.FormatUint(res.Length(), 10)) } if _, ok := res.Output().(io.Reader); ok { // we don't set the Content-Type for streams, so that browsers can MIME-sniff the type themselves // we set this header so clients have a way to know this is an output stream // (not marshalled command output) mime = "" h.Set(streamHeader, "1") } // if output is a channel and user requested streaming channels, // use chunk copier for the output _, isChan := res.Output().(chan interface{}) if !isChan { _, isChan = res.Output().(<-chan interface{}) } streamChans, _, _ := req.Option("stream-channels").Bool() if isChan { h.Set(channelHeader, "1") if streamChans { // streaming output from a channel will always be json objects mime = applicationJson } } if mime != "" { h.Set(contentTypeHeader, mime) } h.Set(transferEncodingHeader, "chunked") if err := writeResponse(status, w, out); err != nil { log.Error("error while writing stream", err) } }