func (s *LogReaderSuite) TestNextError(c *gc.C) { cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} jsonReader := mockStream{stub: stub} conn.ReturnConnectStream = jsonReader failure := errors.New("an error") stub.SetErrors(nil, failure) var cfg params.LogStreamConfig stream, err := logstream.Open(conn, cfg, cUUID) c.Assert(err, gc.IsNil) var nextErr error done := make(chan struct{}) go func() { _, nextErr = stream.Next() c.Check(errors.Cause(nextErr), gc.Equals, failure) close(done) }() select { case <-done: case <-time.After(coretesting.LongWait): c.Errorf("timed out waiting for record") } stub.CheckCallNames(c, "ConnectStream", "ReadJSON") }
func (s *LogReaderSuite) TestOpenError(c *gc.C) { cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} failure := errors.New("foo") stub.SetErrors(failure) var cfg params.LogStreamConfig _, err := logstream.Open(conn, cfg, cUUID) c.Check(err, gc.ErrorMatches, "cannot connect to /logstream: foo") stub.CheckCallNames(c, "ConnectStream") }
// Manifold returns a dependency manifold that runs a log forwarding // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { openLogStream := config.OpenLogStream if openLogStream == nil { openLogStream = func(caller base.APICaller, cfg params.LogStreamConfig, controllerUUID string) (LogStream, error) { return logstream.Open(caller, cfg, controllerUUID) } } openForwarder := config.OpenLogForwarder if openForwarder == nil { openForwarder = NewLogForwarder } return dependency.Manifold{ Inputs: []string{ config.StateName, // ...just to force it to run only on the controller. config.APICallerName, }, Start: func(context dependency.Context) (worker.Worker, error) { var apiCaller base.APICaller if err := context.Get(config.APICallerName, &apiCaller); err != nil { return nil, errors.Trace(err) } agentFacade := apiagent.NewState(apiCaller) controllerCfg, err := agentFacade.ControllerConfig() if err != nil { return nil, errors.Annotate(err, "cannot read controller config") } orchestrator, err := newOrchestratorForController(OrchestratorArgs{ ControllerUUID: controllerCfg.ControllerUUID(), LogForwardConfig: agentFacade, Caller: apiCaller, Sinks: config.Sinks, OpenLogStream: openLogStream, OpenLogForwarder: openForwarder, }) return orchestrator, errors.Annotate(err, "creating log forwarding orchestrator") }, } }
func (s *LogReaderSuite) TestOpenFullConfig(c *gc.C) { cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} stream := mockStream{stub: stub} conn.ReturnConnectStream = stream cfg := params.LogStreamConfig{ AllModels: true, Sink: "spam", } _, err := logstream.Open(conn, cfg, cUUID) c.Assert(err, gc.IsNil) stub.CheckCallNames(c, "ConnectStream") stub.CheckCall(c, 0, "ConnectStream", `/logstream`, url.Values{ "all": []string{"true"}, "sink": []string{"spam"}, }) }
func (s *LogReaderSuite) TestClose(c *gc.C) { cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} jsonReader := mockStream{stub: stub} conn.ReturnConnectStream = jsonReader var cfg params.LogStreamConfig stream, err := logstream.Open(conn, cfg, cUUID) c.Assert(err, gc.IsNil) stub.ResetCalls() err = stream.Close() c.Assert(err, jc.ErrorIsNil) err = stream.Close() // idempotent c.Assert(err, jc.ErrorIsNil) _, err = stream.Next() c.Check(err, gc.ErrorMatches, `cannot read from closed stream`) stub.CheckCallNames(c, "Close") }
func (s *LogReaderSuite) TestNextOneRecord(c *gc.C) { ts := time.Now() apiRec := params.LogStreamRecord{ ModelUUID: "deadbeef-2f18-4fd2-967d-db9663db7bea", Entity: "machine-99", Version: version.Current.String(), Timestamp: ts, Module: "api.logstream.test", Location: "test.go:42", Level: loggo.INFO.String(), Message: "test message", } apiRecords := params.LogStreamRecords{ Records: []params.LogStreamRecord{apiRec}, } cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} jsonReader := mockStream{stub: stub} logsCh := make(chan params.LogStreamRecords, 1) logsCh <- apiRecords jsonReader.ReturnReadJSON = logsCh conn.ReturnConnectStream = jsonReader var cfg params.LogStreamConfig stream, err := logstream.Open(conn, cfg, cUUID) c.Assert(err, gc.IsNil) stub.ResetCalls() // Check the record we injected into the stream. var records []logfwd.Record done := make(chan struct{}) go func() { records, err = stream.Next() c.Assert(err, jc.ErrorIsNil) close(done) }() select { case <-done: case <-time.After(coretesting.LongWait): c.Errorf("timed out waiting for record") } c.Assert(records, gc.HasLen, 1) c.Check(records[0], jc.DeepEquals, logfwd.Record{ Origin: logfwd.Origin{ ControllerUUID: cUUID, ModelUUID: "deadbeef-2f18-4fd2-967d-db9663db7bea", Hostname: "machine-99.deadbeef-2f18-4fd2-967d-db9663db7bea", Type: logfwd.OriginTypeMachine, Name: "99", Software: logfwd.Software{ PrivateEnterpriseNumber: 28978, Name: "jujud-machine-agent", Version: version.Current, }, }, Timestamp: ts, Level: loggo.INFO, Location: logfwd.SourceLocation{ Module: "api.logstream.test", Filename: "test.go", Line: 42, }, Message: "test message", }) stub.CheckCallNames(c, "ReadJSON") // Make sure we don't get extras. done = make(chan struct{}) go func() { records, err = stream.Next() c.Assert(err, jc.ErrorIsNil) close(done) }() select { case <-done: c.Errorf("got extra record: %#v", records) case <-time.After(coretesting.ShortWait): } }