func (h *jobAPI) PullImages(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { tufDB, err := extractTufDB(r) if err != nil { httphelper.Error(w, err) return } defer os.Remove(tufDB) info := make(chan layer.PullInfo) stream := sse.NewStream(w, info, nil) go stream.Serve() if err := pinkerton.PullImages( tufDB, r.URL.Query().Get("repository"), r.URL.Query().Get("driver"), r.URL.Query().Get("root"), info, ); err != nil { stream.CloseWithError(err) return } stream.Wait() }
func (h *jobAPI) PullImages(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log := h.host.log.New("fn", "PullImages") log.Info("extracting TUF database") tufDB, err := extractTufDB(r) if err != nil { log.Error("error extracting TUF database", "err", err) httphelper.Error(w, err) return } defer os.Remove(tufDB) info := make(chan layer.PullInfo) stream := sse.NewStream(w, info, nil) go stream.Serve() log.Info("pulling images") if err := pinkerton.PullImages( tufDB, r.URL.Query().Get("repository"), r.URL.Query().Get("driver"), r.URL.Query().Get("root"), r.URL.Query().Get("version"), info, ); err != nil { log.Error("error pulling images", "err", err) stream.CloseWithError(err) return } stream.Wait() }
func (api *httpAPI) EventsHandler(w http.ResponseWriter, req *http.Request, params httprouter.Params) { api.InstallerStackMtx.Lock() s := api.InstallerStacks[params.ByName("id")] api.InstallerStackMtx.Unlock() if s == nil { httphelper.ObjectNotFoundError(w, "install instance not found") return } eventChan := make(chan *httpEvent) doneChan, errChan := s.Subscribe(eventChan) stream := sse.NewStream(w, eventChan, s.logger) stream.Serve() s.logger.Info(fmt.Sprintf("streaming events for %s", s.ID)) go func() { for { select { case err := <-errChan: s.logger.Info(err.Error()) stream.Error(err) case <-doneChan: stream.Close() return } } }() stream.Wait() }
func (h *httpAPI) handleStream(w http.ResponseWriter, params httprouter.Params, kind discoverd.EventKind) { ch := make(chan *discoverd.Event, 64) // TODO: figure out how big this buffer should be stream := h.Store.Subscribe(params.ByName("service"), true, kind, ch) s := sse.NewStream(w, ch, nil) s.Serve() s.Wait() stream.Close() if err := stream.Err(); err != nil { s.CloseWithError(err) } }
// serveStream creates a subscription and streams out events in SSE format. func (h *Handler) serveStream(w http.ResponseWriter, params httprouter.Params, kind discoverd.EventKind) { // Create a buffered channel to receive events. ch := make(chan *discoverd.Event, StreamBufferSize) // Subscribe to events on the store. service := params.ByName("service") stream := h.Store.Subscribe(service, true, kind, ch) // Create and serve an SSE stream. s := sse.NewStream(w, ch, nil) s.Serve() s.Wait() stream.Close() // Check if there was an error while closing. if err := stream.Err(); err != nil { s.CloseWithError(err) } }
func (c *controllerAPI) streamFormations(ctx context.Context, w http.ResponseWriter, req *http.Request) { ch := make(chan *ct.ExpandedFormation) since, err := time.Parse(time.RFC3339, req.FormValue("since")) if err != nil { respondWithError(w, err) return } sub, err := c.formationRepo.Subscribe(ch, since, nil) if err != nil { respondWithError(w, err) return } defer c.formationRepo.Unsubscribe(sub) l, _ := ctxhelper.LoggerFromContext(ctx) stream := sse.NewStream(w, ch, l) stream.Serve() stream.Wait() if err := sub.Err(); err != nil { stream.Error(err) } }
func (h *jobAPI) PullImages(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log := h.host.log.New("fn", "PullImages") log.Info("extracting TUF database") tufDB, err := extractTufDB(r) if err != nil { log.Error("error extracting TUF database", "err", err) httphelper.Error(w, err) return } defer os.Remove(tufDB) query := r.URL.Query() log.Info("initializing TUF client") client, err := newTufClient(tufDB, query.Get("repository")) if err != nil { log.Error("error initializing TUF client", "err", err) httphelper.Error(w, err) return } info := make(chan *ct.ImagePullInfo) stream := sse.NewStream(w, info, nil) go stream.Serve() d := downloader.New(client, h.host.vman, query.Get("version")) log.Info("pulling images") if err := d.DownloadImages(query.Get("config-dir"), info); err != nil { log.Error("error pulling images", "err", err) stream.CloseWithError(err) return } stream.Wait() }
func serveBuildLogStream(b *Build, w http.ResponseWriter) error { res, err := http.Get(b.LogURL) if err != nil { return err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return fmt.Errorf("unexpected status %d getting build log", res.StatusCode) } _, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) if err != nil { return err } ch := make(chan *logLine) stream := sse.NewStream(w, ch, nil) stream.Serve() go func() { mr := multipart.NewReader(res.Body, params["boundary"]) for { p, err := mr.NextPart() if err != nil { stream.CloseWithError(err) return } s := bufio.NewScanner(p) for s.Scan() { ch <- &logLine{p.FileName(), s.Text()} } if err := s.Err(); err != nil { stream.CloseWithError(err) return } } }() stream.Wait() return nil }
func (c *controllerAPI) AppLog(ctx context.Context, w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(ctx) opts := logaggc.LogOpts{ Follow: req.FormValue("follow") == "true", JobID: req.FormValue("job_id"), } if vals, ok := req.Form["process_type"]; ok && len(vals) > 0 { opts.ProcessType = &vals[len(vals)-1] } if strLines := req.FormValue("lines"); strLines != "" { lines, err := strconv.Atoi(req.FormValue("lines")) if err != nil { respondWithError(w, err) return } opts.Lines = &lines } rc, err := c.logaggc.GetLog(c.getApp(ctx).ID, &opts) if err != nil { respondWithError(w, err) return } if cn, ok := w.(http.CloseNotifier); ok { go func() { select { case <-cn.CloseNotify(): rc.Close() case <-ctx.Done(): } }() } defer cancel() defer rc.Close() if !strings.Contains(req.Header.Get("Accept"), "text/event-stream") { w.Header().Set("Content-Type", "text/plain") w.WriteHeader(200) // Send headers right away if following if wf, ok := w.(http.Flusher); ok && opts.Follow { wf.Flush() } fw := httphelper.FlushWriter{Writer: w, Enabled: opts.Follow} io.Copy(fw, rc) return } ch := make(chan *sseLogChunk) l, _ := ctxhelper.LoggerFromContext(ctx) s := sse.NewStream(w, ch, l) defer s.Close() s.Serve() msgc := make(chan *json.RawMessage) go func() { defer close(msgc) dec := json.NewDecoder(rc) for { var m json.RawMessage if err := dec.Decode(&m); err != nil { if err != io.EOF { l.Error("decoding logagg stream", err) } return } msgc <- &m } }() for { select { case m := <-msgc: if m == nil { ch <- &sseLogChunk{Event: "eof"} return } // write to sse select { case ch <- &sseLogChunk{Event: "message", Data: *m}: case <-s.Done: return case <-ctx.Done(): return } case <-s.Done: return case <-ctx.Done(): return } } }
func streamEvents(ctx context.Context, w http.ResponseWriter, req *http.Request, eventListener *EventListener, app *ct.App, repo *EventRepo) (err error) { var appID string if app != nil { appID = app.ID } var lastID int64 if req.Header.Get("Last-Event-Id") != "" { lastID, err = strconv.ParseInt(req.Header.Get("Last-Event-Id"), 10, 64) if err != nil { return ct.ValidationError{Field: "Last-Event-Id", Message: "is invalid"} } } var count int if req.FormValue("count") != "" { count, err = strconv.Atoi(req.FormValue("count")) if err != nil { return ct.ValidationError{Field: "count", Message: "is invalid"} } } objectTypes := strings.Split(req.FormValue("object_types"), ",") if len(objectTypes) == 1 && objectTypes[0] == "" { objectTypes = []string{} } objectID := req.FormValue("object_id") past := req.FormValue("past") l, _ := ctxhelper.LoggerFromContext(ctx) log := l.New("fn", "Events", "object_types", objectTypes, "object_id", objectID) ch := make(chan *ct.Event) s := sse.NewStream(w, ch, log) s.Serve() defer func() { if err == nil { s.Close() } else { s.CloseWithError(err) } }() sub, err := eventListener.Subscribe(appID, objectTypes, objectID) if err != nil { return err } defer sub.Close() var currID int64 if past == "true" || lastID > 0 { list, err := repo.ListEvents(appID, objectTypes, objectID, lastID, count) if err != nil { return err } // events are in ID DESC order, so iterate in reverse for i := len(list) - 1; i >= 0; i-- { e := list[i] ch <- e currID = e.ID } } for { select { case <-s.Done: return case event, ok := <-sub.Events: if !ok { return sub.Err } if event.ID <= currID { continue } ch <- event } } }
func streamJobs(ctx context.Context, req *http.Request, w http.ResponseWriter, app *ct.App, repo *JobRepo) (err error) { var lastID int64 if req.Header.Get("Last-Event-Id") != "" { lastID, err = strconv.ParseInt(req.Header.Get("Last-Event-Id"), 10, 64) if err != nil { return ct.ValidationError{Field: "Last-Event-Id", Message: "is invalid"} } } var count int if req.FormValue("count") != "" { count, err = strconv.Atoi(req.FormValue("count")) if err != nil { return ct.ValidationError{Field: "count", Message: "is invalid"} } } ch := make(chan *ct.JobEvent) l, _ := ctxhelper.LoggerFromContext(ctx) s := sse.NewStream(w, ch, l) s.Serve() connected := make(chan struct{}) done := make(chan struct{}) listenEvent := func(ev pq.ListenerEventType, listenErr error) { switch ev { case pq.ListenerEventConnected: close(connected) case pq.ListenerEventDisconnected: if done != nil { close(done) done = nil } case pq.ListenerEventConnectionAttemptFailed: err = listenErr if done != nil { close(done) done = nil } } } listener := pq.NewListener(repo.db.DSN(), 10*time.Second, time.Minute, listenEvent) defer listener.Close() listener.Listen("job_events:" + postgres.FormatUUID(app.ID)) var currID int64 if lastID > 0 || count > 0 { events, err := repo.listEvents(app.ID, lastID, count) if err != nil { return err } // events are in ID DESC order, so iterate in reverse for i := len(events) - 1; i >= 0; i-- { e := events[i] ch <- e currID = e.ID } } select { case <-done: return case <-connected: } for { select { case <-s.Done: return case <-done: return case n := <-listener.Notify: id, err := strconv.ParseInt(n.Extra, 10, 64) if err != nil { return err } if id <= currID { continue } e, err := repo.getEvent(id) if err != nil { return err } ch <- e } } }
func (c *controllerAPI) streamFormations(ctx context.Context, w http.ResponseWriter, req *http.Request) (err error) { l, _ := ctxhelper.LoggerFromContext(ctx) ch := make(chan *ct.ExpandedFormation) stream := sse.NewStream(w, ch, l) stream.Serve() defer func() { if err == nil { stream.Close() } else { stream.CloseWithError(err) } }() since, err := time.Parse(time.RFC3339Nano, req.FormValue("since")) if err != nil { return err } eventListener, err := c.maybeStartEventListener() if err != nil { l.Error("error starting event listener", "err", err) return err } sub, err := eventListener.Subscribe("", []string{string(ct.EventTypeScale)}, "") if err != nil { return err } defer sub.Close() formations, err := c.formationRepo.ListSince(since) if err != nil { return err } currentUpdatedAt := since for _, formation := range formations { select { case <-stream.Done: return nil case ch <- formation: if formation.UpdatedAt.After(currentUpdatedAt) { currentUpdatedAt = formation.UpdatedAt } } } select { case <-stream.Done: return nil case ch <- &ct.ExpandedFormation{}: } for { select { case <-stream.Done: return case event, ok := <-sub.Events: if !ok { return sub.Err } var scale ct.Scale if err := json.Unmarshal(event.Data, &scale); err != nil { l.Error("error deserializing scale event", "event.id", event.ID, "err", err) continue } formation, err := c.formationRepo.GetExpanded(event.AppID, scale.ReleaseID, true) if err != nil { l.Error("error expanding formation", "app.id", event.AppID, "release.id", scale.ReleaseID, "err", err) continue } if formation.UpdatedAt.Before(currentUpdatedAt) { continue } select { case <-stream.Done: return nil case ch <- formation: } } } }