// ServeConn takes an inbound conn and proxies it to a backend. func (p *ReverseProxy) ServeConn(ctx context.Context, dconn net.Conn) { transport := p.transport if transport == nil { panic("router: nil transport for proxy") } defer dconn.Close() clientGone := dconn.(http.CloseNotifier).CloseNotify() ctx, cancel := context.WithCancel(ctx) defer cancel() // finish cancellation goroutine go func() { select { case <-clientGone: cancel() // client went away, cancel request case <-ctx.Done(): } }() uconn, err := transport.Connect(ctx) if err != nil { p.logf("router: proxy error: %v", err) return } defer uconn.Close() joinConns(uconn, dconn) }
// ServeConn takes an inbound conn and proxies it to a backend. func (p *ReverseProxy) ServeConn(ctx context.Context, dconn net.Conn) { transport := p.transport if transport == nil { panic("router: nil transport for proxy") } defer dconn.Close() clientGone := dconn.(http.CloseNotifier).CloseNotify() ctx, cancel := context.WithCancel(ctx) defer cancel() // finish cancellation goroutine go func() { select { case <-clientGone: cancel() // client went away, cancel request case <-ctx.Done(): } }() l := p.Logger.New("client_addr", dconn.RemoteAddr(), "host_addr", dconn.LocalAddr(), "proxy", "tcp") uconn, err := transport.Connect(ctx, l) if err != nil { return } defer uconn.Close() joinConns(uconn, dconn) }
func (a *aggregatorAPI) GetLog(ctx context.Context, w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(ctx) if cn, ok := w.(http.CloseNotifier); ok { ch := cn.CloseNotify() go func() { select { case <-ch: cancel() case <-ctx.Done(): } }() } defer cancel() params, _ := ctxhelper.ParamsFromContext(ctx) follow := false if strFollow := req.FormValue("follow"); strFollow == "true" { follow = true } var ( backlog bool lines int err error ) if strLines := req.FormValue("lines"); strLines != "" { if lines, err = strconv.Atoi(strLines); err != nil { httphelper.ValidationError(w, "lines", err.Error()) return } if lines < 0 || lines > 10000 { httphelper.ValidationError(w, "lines", "lines must be an integer between 0 and 10000") return } backlog = lines > 0 } filters := make(filterSlice, 0) if jobID := req.FormValue("job_id"); jobID != "" { filters = append(filters, filterJobID(jobID)) } if processTypeVals, ok := req.Form["process_type"]; ok && len(processTypeVals) > 0 { val := processTypeVals[len(processTypeVals)-1] filters = append(filters, filterProcessType(val)) } iter := &Iterator{ id: params.ByName("channel_id"), follow: follow, backlog: backlog, lines: lines, filter: filters, donec: ctx.Done(), } writeMessages(ctx, w, iter.Scan(a.agg)) }
func (d *pgDataStore) Sync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error { ctx, cancel := context.WithCancel(ctx) idc, errc, err := d.startListener(ctx) if err != nil { return err } initialRoutes, err := d.List() if err != nil { cancel() return err } toRemove := h.Current() for _, route := range initialRoutes { if _, ok := toRemove[route.ID]; ok { delete(toRemove, route.ID) } if err := h.Set(route); err != nil { return err } } // send remove for any routes that are no longer in the database for id := range toRemove { if err := h.Remove(id); err != nil { return err } } close(startc) for { select { case id := <-idc: if err := d.handleUpdate(h, id); err != nil { cancel() return err } case err = <-errc: return err case <-ctx.Done(): // wait for startListener to finish (it will either // close idc or send an error on errc) select { case <-idc: case <-errc: } return nil } } }
func (a *aggregatorAPI) GetLog(ctx context.Context, w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(ctx) if cn, ok := w.(http.CloseNotifier); ok { go func() { select { case <-cn.CloseNotify(): cancel() case <-ctx.Done(): } }() } defer cancel() params, _ := ctxhelper.ParamsFromContext(ctx) channelID := params.ByName("channel_id") follow := false if strFollow := req.FormValue("follow"); strFollow == "true" { follow = true } lines := -1 // default to all lines if strLines := req.FormValue("lines"); strLines != "" { var err error lines, err = strconv.Atoi(strLines) if err != nil || lines < 0 || lines > 10000 { httphelper.ValidationError(w, "lines", "lines must be an integer between 0 and 10000") return } } filters := make(filterSlice, 0) if jobID := req.FormValue("job_id"); jobID != "" { filters = append(filters, filterJobID(jobID)) } if processTypeVals, ok := req.Form["process_type"]; ok && len(processTypeVals) > 0 { val := processTypeVals[len(processTypeVals)-1] filters = append(filters, filterProcessType(val)) } w.WriteHeader(200) var msgc <-chan *rfc5424.Message if follow { msgc = a.agg.ReadLastNAndSubscribe(channelID, lines, filters, ctx.Done()) } else { msgc = a.agg.ReadLastN(channelID, lines, filters, ctx.Done()) } writeMessages(ctx, w, msgc) }
func (l *TCPListener) Start() error { ctx := context.Background() // TODO(benburkert): make this an argument ctx, l.stopSync = context.WithCancel(ctx) if l.Watcher != nil { return errors.New("router: tcp listener already started") } if l.wm == nil { l.wm = NewWatchManager() } l.Watcher = l.wm if l.ds == nil { return errors.New("router: tcp listener missing data store") } l.DataStoreReader = l.ds l.services = make(map[string]*tcpService) l.routes = make(map[string]*tcpRoute) l.ports = make(map[int]*tcpRoute) l.listeners = make(map[int]net.Listener) if l.startPort != 0 && l.endPort != 0 { for i := l.startPort; i <= l.endPort; i++ { addr := fmt.Sprintf("%s:%d", l.IP, i) listener, err := listenFunc("tcp4", addr) if err != nil { l.Close() return listenErr{addr, err} } l.listeners[i] = listener } } // TODO(benburkert): the sync API cannot handle routes deleted while the // listen/notify connection is disconnected if err := l.startSync(ctx); err != nil { l.Close() return err } return nil }
func (d *pgDataStore) Sync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error { ctx, cancel := context.WithCancel(ctx) idc, errc, err := d.startListener(ctx) if err != nil { return err } initialRoutes, err := d.List() if err != nil { cancel() return err } for _, route := range initialRoutes { if err := h.Set(route); err != nil { return err } } close(startc) for { select { case id := <-idc: if err := d.handleUpdate(h, id); err != nil { cancel() return err } case err = <-errc: return err case <-ctx.Done(): // wait for startListener to finish (it will either // close idc or send an error on errc) select { case <-idc: case <-errc: } return nil } } }
func (s *HTTPListener) Start() error { ctx := context.Background() // TODO(benburkert): make this an argument ctx, s.stopSync = context.WithCancel(ctx) if s.Watcher != nil { return errors.New("router: http listener already started") } if s.wm == nil { s.wm = NewWatchManager() } s.Watcher = s.wm if s.ds == nil { return errors.New("router: http listener missing data store") } s.DataStoreReader = s.ds s.routes = make(map[string]*httpRoute) s.domains = make(map[string]*httpRoute) s.services = make(map[string]*httpService) if s.cookieKey == nil { s.cookieKey = &[32]byte{} } // TODO(benburkert): the sync API cannot handle routes deleted while the // listen/notify connection is disconnected if err := s.startSync(ctx); err != nil { return err } if err := s.startListen(); err != nil { s.stopSync() return err } return nil }
func (c *controllerAPI) AppLog(ctx context.Context, w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(ctx) opts := logaggc.LogOpts{ Follow: req.FormValue("follow") == "true", JobID: req.FormValue("job_id"), } if vals, ok := req.Form["process_type"]; ok && len(vals) > 0 { opts.ProcessType = &vals[len(vals)-1] } if strLines := req.FormValue("lines"); strLines != "" { lines, err := strconv.Atoi(req.FormValue("lines")) if err != nil { respondWithError(w, err) return } opts.Lines = &lines } rc, err := c.logaggc.GetLog(c.getApp(ctx).ID, &opts) if err != nil { respondWithError(w, err) return } if cn, ok := w.(http.CloseNotifier); ok { go func() { select { case <-cn.CloseNotify(): rc.Close() case <-ctx.Done(): } }() } defer cancel() defer rc.Close() if !strings.Contains(req.Header.Get("Accept"), "text/event-stream") { w.Header().Set("Content-Type", "text/plain") w.WriteHeader(200) // Send headers right away if following if wf, ok := w.(http.Flusher); ok && opts.Follow { wf.Flush() } fw := httphelper.FlushWriter{Writer: w, Enabled: opts.Follow} io.Copy(fw, rc) return } ch := make(chan *sseLogChunk) l, _ := ctxhelper.LoggerFromContext(ctx) s := sse.NewStream(w, ch, l) defer s.Close() s.Serve() msgc := make(chan *json.RawMessage) go func() { defer close(msgc) dec := json.NewDecoder(rc) for { var m json.RawMessage if err := dec.Decode(&m); err != nil { if err != io.EOF { l.Error("decoding logagg stream", err) } return } msgc <- &m } }() for { select { case m := <-msgc: if m == nil { ch <- &sseLogChunk{Event: "eof"} return } // write to sse select { case ch <- &sseLogChunk{Event: "message", Data: *m}: case <-s.Done: return case <-ctx.Done(): return } case <-s.Done: return case <-ctx.Done(): return } } }
func (s *LogAggregatorTestSuite) TestAggregatorReadLastNAndSubscribe(c *C) { runTest := func(lines int, filter Filter, expectedBefore, expectedSubMsgs, unexpectedSubMsgs []string) { // set up testing hook: messageReceived := make(chan struct{}) afterMessage = func() { messageReceived <- struct{}{} } defer func() { afterMessage = nil }() delete(s.agg.buffers, "app") // reset the buffer conn, err := net.Dial("tcp", s.agg.Addr) c.Assert(err, IsNil) defer conn.Close() _, err = conn.Write([]byte(sampleLogLine1)) c.Assert(err, IsNil) _, err = conn.Write([]byte(sampleLogLine2)) c.Assert(err, IsNil) for i := 0; i < 2; i++ { <-messageReceived // wait for messages to be received } ctx, cancel := context.WithCancel(context.Background()) defer cancel() if filter == nil { filter = nopFilter } msgc := s.agg.ReadLastNAndSubscribe("app", lines, filter, ctx.Done()) timeout := time.After(5 * time.Second) for _, expectedMsg := range expectedBefore { select { case msg := <-msgc: c.Assert(msg, Not(IsNil)) c.Assert(string(msg.Msg), Equals, expectedMsg) case <-timeout: c.Fatalf("timeout waiting for receive on msgc of %q", expectedMsg) } } select { case msg := <-msgc: c.Fatalf("unexpected message received: %+v", msg) default: } // make sure we skip messages we don't want for _, rawMsg := range unexpectedSubMsgs { _, err = conn.Write([]byte(rawMsg)) c.Assert(err, IsNil) <-messageReceived // wait for message to be received select { case msg := <-msgc: c.Fatalf("received unexpected msg: %s", string(msg.Msg)) default: } } // make sure we get messages we do want for _, rawMsg := range expectedSubMsgs { _, err = conn.Write([]byte(rawMsg)) c.Assert(err, IsNil) <-messageReceived // wait for message to be received select { case msg := <-msgc: c.Assert(strings.HasSuffix(rawMsg, string(msg.Msg)), Equals, true) case <-timeout: c.Fatalf("timeout waiting for expected message on msgc: %q", rawMsg) } } } tests := []struct { lines int filter Filter expectedBefore []string expectedSubMsgs []string unexpectedSubMsgs []string }{ { lines: -1, expectedBefore: []string{ "Starting process with command `bundle exec rackup config.ru -p 24405`", "25 yay this is a message!!!\n", }, expectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1", "60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2", }, unexpectedSubMsgs: []string{ "68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message", }, }, { lines: 1, expectedBefore: []string{ "25 yay this is a message!!!\n", }, expectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1", "60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2", }, unexpectedSubMsgs: []string{ "68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message", }, }, { lines: -1, filter: filterJobID("2"), expectedBefore: []string{ "25 yay this is a message!!!\n", }, expectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2", "68 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message", }, unexpectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1", "70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1", "68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message", }, }, { lines: 0, filter: filterProcessType("web"), expectedBefore: []string{}, expectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2", "60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1", }, unexpectedSubMsgs: []string{ "70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1", "70 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message 2", "68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message", }, }, { lines: 1, filter: filterJobID("2"), expectedBefore: []string{ "25 yay this is a message!!!\n", }, expectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2", "68 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message", }, unexpectedSubMsgs: []string{ "60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1", "70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1", "68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message", }, }, } for i, test := range tests { c.Logf("testing num=%d lines=%d filter=%v", i, test.lines, test.filter) runTest(test.lines, test.filter, test.expectedBefore, test.expectedSubMsgs, test.unexpectedSubMsgs) } }
func (a *aggregatorAPI) GetLog(ctx context.Context, w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(ctx) if cn, ok := w.(http.CloseNotifier); ok { go func() { select { case <-cn.CloseNotify(): cancel() case <-ctx.Done(): } }() } defer cancel() params, _ := ctxhelper.ParamsFromContext(ctx) channelID := params.ByName("channel_id") follow := false if strFollow := req.FormValue("follow"); strFollow == "true" { follow = true } lines := -1 // default to all lines if strLines := req.FormValue("lines"); strLines != "" { var err error lines, err = strconv.Atoi(strLines) if err != nil || lines < 0 || lines > 10000 { httphelper.ValidationError(w, "lines", "lines must be an integer between 0 and 10000") return } } filters := make([]filter, 0) if strJobID := req.FormValue("job_id"); strJobID != "" { filters = append(filters, filterJobID{[]byte(strJobID)}) } if processTypeVals, ok := req.Form["process_type"]; ok && len(processTypeVals) > 0 { val := processTypeVals[len(processTypeVals)-1] filters = append(filters, filterProcessType{[]byte(val)}) } w.WriteHeader(200) var msgc <-chan *rfc5424.Message if follow { msgc = a.agg.ReadLastNAndSubscribe(channelID, lines, filters, ctx.Done()) go flushLoop(w.(http.Flusher), 50*time.Millisecond, ctx.Done()) } else { msgc = a.agg.ReadLastN(channelID, lines, filters, ctx.Done()) } enc := json.NewEncoder(w) for { select { case syslogMsg := <-msgc: if syslogMsg == nil { // channel is closed / done return } if err := enc.Encode(NewMessageFromSyslog(syslogMsg)); err != nil { log15.Error("error writing msg", "err", err) return } case <-ctx.Done(): return } } }