func (*logSuite) TestLog(c *gc.C) { logger := loggo.GetLogger("test") jujuLogger := loggo.GetLogger("juju") logConfig = "<root>=DEBUG;juju=TRACE" c.Assert(logger.EffectiveLogLevel(), gc.Equals, loggo.WARNING) var suite LoggingSuite suite.SetUpSuite(c) c.Assert(logger.EffectiveLogLevel(), gc.Equals, loggo.DEBUG) c.Assert(jujuLogger.EffectiveLogLevel(), gc.Equals, loggo.TRACE) logger.Debugf("message 1") logger.Tracef("message 2") jujuLogger.Tracef("message 3") c.Assert(c.GetTestLog(), gc.Matches, ".*DEBUG test message 1\n"+ ".*TRACE juju message 3\n", ) suite.TearDownSuite(c) logger.Debugf("message 1") logger.Tracef("message 2") jujuLogger.Tracef("message 3") c.Assert(c.GetTestLog(), gc.Matches, ".*DEBUG test message 1\n"+ ".*TRACE juju message 3\n", ) c.Assert(logger.EffectiveLogLevel(), gc.Equals, loggo.WARNING) c.Assert(jujuLogger.EffectiveLogLevel(), gc.Equals, loggo.WARNING) }
func (*GlobalSuite) TestModuleLowered(c *gc.C) { logger1 := loggo.GetLogger("TESTING.MODULE") logger2 := loggo.GetLogger("Testing") c.Assert(logger1.Name(), gc.Equals, "testing.module") c.Assert(logger2.Name(), gc.Equals, "testing") }
func (c *Command) readConfig() *Config { var cmdConfig Config cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError) cmdFlags.Usage = func() { c.Ui.Output(c.Help()) } cmdFlags.StringVar(&cmdConfig.LocalUrl, "localurl", "tcp://localhost:1883", "cloud url to connect to") cmdFlags.StringVar(&cmdConfig.SerialNo, "serial", "unknown", "the serial number of the device") cmdFlags.BoolVar(&cmdConfig.Debug, "debug", false, "enable debug") cmdFlags.BoolVar(&cmdConfig.Trace, "trace", false, "enable trace") cmdFlags.IntVar(&cmdConfig.StatusTimer, "status", 30, "time in seconds between status messages") if err := cmdFlags.Parse(c.args); err != nil { return nil } //if cmdFLags. if cmdConfig.Debug { loggo.GetLogger("").SetLogLevel(loggo.DEBUG) } else { loggo.GetLogger("").SetLogLevel(loggo.INFO) } if cmdConfig.Trace { // enable low-level tracing on mqtt library for _, l := range []**log.Logger{&mqtt.DEBUG, &mqtt.ERROR, &mqtt.CRITICAL, &mqtt.WARN} { *l = log.New(os.Stderr, "", 0) } } return &cmdConfig }
func (*GlobalSuite) TestLevelsSharedForSameModule(c *gc.C) { logger1 := loggo.GetLogger("testing.module") logger2 := loggo.GetLogger("testing.module") logger1.SetLogLevel(loggo.INFO) c.Assert(logger1.IsInfoEnabled(), gc.Equals, true) c.Assert(logger2.IsInfoEnabled(), gc.Equals, true) }
func setupLoggo(debug bool) { // apply flags if debug { loggo.GetLogger(*logName).SetLogLevel(loggo.DEBUG) } else { loggo.GetLogger(*logName).SetLogLevel(loggo.INFO) } }
func init() { var level loggo.Level useSyslog := true if _, logErr := os.Stat("/dev/log"); logErr != nil { useSyslog = false } // snappy doesn't support syslog, so when we detect a snappy app we will disable syslog // and leave the default stderr logger (which works because snappy uses systemd) // eventually, the error case of syslog not existing should be exposed by loggo-syslog instead if os.Getenv("SNAPP_APP_PATH") != "" { useSyslog = false } debug := os.Getenv("DEBUG") if debug != "" { switch debug { case "INFO": level = loggo.INFO case "WARNING": level = loggo.WARNING case "ERROR": level = loggo.ERROR default: level = loggo.DEBUG } } else { // set the default level level = loggo.INFO // kill stderr log.SetOutput(ioutil.Discard) if useSyslog { // remove the default writer loggo.RemoveWriter("default") } } loggo.GetLogger("").SetLogLevel(level) if level != loggo.INFO { loggo.GetLogger("").Infof("Root logger initialized at level %v", level) } // setup the syslog writer if useSyslog { loggo.RegisterWriter("syslog", lsyslog.NewDefaultSyslogWriter(loggo.TRACE, path.Base(os.Args[0]), "LOCAL7"), loggo.TRACE) } }
func (*loggerSuite) TestLocationCapture(c *gc.C) { writer := &loggo.TestWriter{} loggo.ReplaceDefaultWriter(writer) logger := loggo.GetLogger("test") logger.SetLogLevel(loggo.TRACE) logger.Criticalf("critical message") //tag critical-location logger.Errorf("error message") //tag error-location logger.Warningf("warning message") //tag warning-location logger.Infof("info message") //tag info-location logger.Debugf("debug message") //tag debug-location logger.Tracef("trace message") //tag trace-location log := writer.Log() tags := []string{ "critical-location", "error-location", "warning-location", "info-location", "debug-location", "trace-location", } c.Assert(log, gc.HasLen, len(tags)) for x := range tags { assertLocation(c, log[x], tags[x]) } }
func (*rpcSuite) TestBadCall(c *gc.C) { loggo.GetLogger("juju.rpc").SetLogLevel(loggo.TRACE) root := &Root{ simple: make(map[string]*SimpleMethods), } a0 := &SimpleMethods{root: root, id: "a0"} root.simple["a0"] = a0 client, srvDone, serverNotifier := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) testBadCall(c, client, serverNotifier, rpc.Request{"BadSomething", 0, "a0", "No"}, `unknown object type "BadSomething"`, rpc.CodeNotImplemented, false, ) testBadCall(c, client, serverNotifier, rpc.Request{"SimpleMethods", 0, "xx", "No"}, "no such request - method SimpleMethods.No is not implemented", rpc.CodeNotImplemented, false, ) testBadCall(c, client, serverNotifier, rpc.Request{"SimpleMethods", 0, "xx", "Call0r0"}, `unknown SimpleMethods id`, "", true, ) }
func TestConfiguringCustomLogger(t *testing.T) { l1 := log.New(os.Stdout, "", log.Lshortfile) l2 := &LoggoWrapper{loggo.GetLogger("test")} var testCases = []struct { config Configuration notify bool msg string }{ { config: Configuration{ReleaseStage: "production", NotifyReleaseStages: []string{"development", "production"}, Logger: l1}, notify: true, msg: "Failed to assign log.Logger", }, { config: Configuration{ReleaseStage: "production", NotifyReleaseStages: []string{"development", "production"}, Logger: l2}, notify: true, msg: "Failed to assign LoggoWrapper", }, } for _, testCase := range testCases { Configure(testCase.config) // call printf just to illustrate it is present as the compiler does most of the hard work testCase.config.Logger.Printf("hello %s", "bugsnag") } }
func (s *modelManagerStateSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.authoriser = apiservertesting.FakeAuthorizer{ Tag: s.AdminUserTag(c), } loggo.GetLogger("juju.apiserver.modelmanager").SetLogLevel(loggo.TRACE) }
func (s *writerSuite) SetUpTest(c *gc.C) { loggo.ResetLoggers() loggo.RemoveWriter("default") s.logger = loggo.GetLogger("test.writer") // Make it so the logger itself writes all messages. s.logger.SetLogLevel(loggo.TRACE) }
func (s *ToolsMetadataSuite) SetUpTest(c *gc.C) { s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.AddCleanup(dummy.Reset) cfg, err := config.New(config.UseDefaults, map[string]interface{}{ "name": "erewhemos", "type": "dummy", "uuid": coretesting.ModelTag.Id(), "controller-uuid": coretesting.ControllerTag.Id(), "conroller": true, }) c.Assert(err, jc.ErrorIsNil) env, err := bootstrap.Prepare( modelcmd.BootstrapContextNoVerify(coretesting.Context(c)), jujuclienttesting.NewMemStore(), bootstrap.PrepareParams{ ControllerConfig: coretesting.FakeControllerConfig(), ControllerName: cfg.Name(), ModelConfig: cfg.AllAttrs(), Cloud: dummy.SampleCloudSpec(), AdminSecret: "admin-secret", }, ) c.Assert(err, jc.ErrorIsNil) s.env = env loggo.GetLogger("").SetLogLevel(loggo.INFO) // Switch the default tools location. s.publicStorageDir = c.MkDir() s.PatchValue(&tools.DefaultBaseURL, s.publicStorageDir) }
func (s *BenchmarksSuite) SetUpTest(c *gc.C) { loggo.ResetLogging() s.logger = loggo.GetLogger("test.writer") s.writer = &writer{} err := loggo.RegisterWriter("test", s.writer) c.Assert(err, gc.IsNil) }
func (c *MigrateCommand) Run(ctx *cmd.Context) (err error) { defer func() { if err != nil { fmt.Fprintf(ctx.Stdout, "error stack:\n"+errors.ErrorStack(err)) } }() loggo.GetLogger("juju").SetLogLevel(loggo.DEBUG) conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, c.machineTag)) if err != nil { return err } info, ok := conf.MongoInfo() if !ok { return errors.Errorf("no state info available") } st, err := state.Open(conf.Model(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) if err != nil { return err } defer st.Close() if c.operation == "export" { return c.exportModel(ctx, st) } return c.importModel(ctx, st) }
func (*suite) TestWriteMessageLogsRequests(c *gc.C) { codecLogger := loggo.GetLogger("juju.rpc.jsoncodec") defer codecLogger.SetLogLevel(codecLogger.LogLevel()) codecLogger.SetLogLevel(loggo.TRACE) codec := jsoncodec.New(&testConn{}) h := rpc.Header{ RequestId: 1, Request: rpc.Request{ Type: "foo", Id: "id", Action: "frob", }, } // Check that logging is off by default err := codec.WriteMessage(&h, value{X: "param"}) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, "") // Check that we see a log message when we switch logging on. codec.SetLogging(true) err = codec.WriteMessage(&h, value{X: "param"}) c.Assert(err, gc.IsNil) msg := `{"RequestId":1,"Type":"foo","Id":"id","Request":"frob","Params":{"X":"param"}}` c.Assert(c.GetTestLog(), gc.Matches, `.*TRACE juju.rpc.jsoncodec -> `+regexp.QuoteMeta(msg)+`\n`) // Check that we can switch it off again codec.SetLogging(false) err = codec.WriteMessage(&h, value{X: "param"}) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, `.*TRACE juju.rpc.jsoncodec -> `+regexp.QuoteMeta(msg)+`\n`) }
// New returns a Worker backed by Config. The caller is responsible for // Kill()ing the Worker and handling any errors returned from Wait(); // but as it happens it's designed to be an apiserver/common.Resource, // and never to exit unless Kill()ed, so in practice Stop(), which will // call Kill() and Wait() internally, is Good Enough. func New(config Config) (*Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } name := fmt.Sprintf("juju.apiserver.presence.%s", config.Identity) w := &Worker{ config: config, logger: loggo.GetLogger(name), } ready := make(chan struct{}) err := catacomb.Invoke(catacomb.Plan{ Site: &w.catacomb, Work: func() error { // Run once to prime presence before diving into the loop. pinger := w.startPinger() if ready != nil { close(ready) ready = nil } if pinger != nil { w.waitOnPinger(pinger) } return w.loop() }, }) if err != nil { return nil, errors.Trace(err) } <-ready return w, nil }
func (s *serverSuite) TestNonCompatiblePathsAre404(c *gc.C) { // we expose the API at '/' for compatibility, and at '/ENVUUID/api' // for the correct location, but other Paths should fail. loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) srv := newServer(c, s.State) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. addr := fmt.Sprintf("localhost:%d", srv.Addr().Port) // '/' should be fine conn, err := dialWebsocket(c, addr, "/") c.Assert(err, jc.ErrorIsNil) conn.Close() // '/environment/ENVIRONUUID/api' should be fine conn, err = dialWebsocket(c, addr, "/environment/dead-beef-123456/api") c.Assert(err, jc.ErrorIsNil) conn.Close() // '/randompath' is not ok conn, err = dialWebsocket(c, addr, "/randompath") // Unfortunately go.net/websocket just returns Bad Status, it doesn't // give us any information (whether this was a 404 Not Found, Internal // Server Error, 200 OK, etc.) c.Assert(err, gc.ErrorMatches, `websocket.Dial wss://localhost:\d+/randompath: bad status`) c.Assert(conn, gc.IsNil) }
func (srv *Server) serveConn(wsConn *websocket.Conn, reqNotifier *requestNotifier, modelUUID string) error { codec := jsoncodec.NewWebsocket(wsConn) if loggo.GetLogger("juju.rpc.jsoncodec").EffectiveLogLevel() <= loggo.TRACE { codec.SetLogging(true) } var notifier rpc.RequestNotifier if logger.EffectiveLogLevel() <= loggo.DEBUG { // Incur request monitoring overhead only if we // know we'll need it. notifier = reqNotifier } conn := rpc.NewConn(codec, notifier) h, err := srv.newAPIHandler(conn, reqNotifier, modelUUID) if err != nil { conn.ServeFinder(&errRoot{err}, serverError) } else { adminApis := make(map[int]interface{}) for apiVersion, factory := range srv.adminApiFactories { adminApis[apiVersion] = factory(srv, h, reqNotifier) } conn.ServeFinder(newAnonRoot(h, adminApis), serverError) } conn.Start() select { case <-conn.Dead(): case <-srv.tomb.Dying(): } return conn.Close() }
func (c *Command) Run(args []string) int { c.Ui = &cli.PrefixedUi{ OutputPrefix: "==> ", InfoPrefix: " ", ErrorPrefix: "==> ", Ui: c.Ui, } c.args = args config := c.readConfig() if config == nil { return 1 } c.args = args c.Ui.Output("MQTT bridgeify agent running!") c.Ui.Info("Getting on the bus: " + config.Token) c.Ui.Info("Local url: " + config.LocalUrl) c.log = loggo.GetLogger("") c.agent = createAgent(config) if err := c.agent.start(); err != nil { c.Ui.Error(fmt.Sprintf("error starting agent %s", err)) } c.bus = createBus(config, c.agent) c.bus.listen() return c.handleSignals(config) }
func (s *SimpleStreamsToolsSuite) TestFindToolsFiltering(c *gc.C) { var tw loggo.TestWriter c.Assert(loggo.RegisterWriter("filter-tester", &tw, loggo.TRACE), gc.IsNil) defer loggo.RemoveWriter("filter-tester") logger := loggo.GetLogger("juju.environs") defer logger.SetLogLevel(logger.LogLevel()) logger.SetLogLevel(loggo.TRACE) _, err := envtools.FindTools( s.env, 1, -1, "released", coretools.Filter{Number: version.Number{Major: 1, Minor: 2, Patch: 3}}) c.Assert(err, jc.Satisfies, errors.IsNotFound) // This is slightly overly prescriptive, but feel free to change or add // messages. This still helps to ensure that all log messages are // properly formed. messages := []jc.SimpleMessage{ {loggo.INFO, "reading tools with major version 1"}, {loggo.INFO, "filtering tools by version: \\d+\\.\\d+\\.\\d+"}, {loggo.TRACE, "no architecture specified when finding tools, looking for "}, {loggo.TRACE, "no series specified when finding tools, looking for \\[.*\\]"}, } sources, err := envtools.GetMetadataSources(s.env) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 2*len(sources); i++ { messages = append(messages, jc.SimpleMessage{loggo.TRACE, `fetchData failed for .*`}, jc.SimpleMessage{loggo.TRACE, `cannot load index .*`}) } c.Check(tw.Log(), jc.LogMatches, messages) }
func (*cloudinitSuite) TestCloudInitConfigureBootstrapLogging(c *gc.C) { loggo.GetLogger("").SetLogLevel(loggo.INFO) envConfig := minimalEnvironConfig(c) instConfig := makeBootstrapConfig("quantal").maybeSetEnvironConfig(envConfig) rendered := instConfig.render() cloudcfg, err := cloudinit.New(rendered.Series) c.Assert(err, jc.ErrorIsNil) udata, err := cloudconfig.NewUserdataConfig(&rendered, cloudcfg) c.Assert(err, jc.ErrorIsNil) err = udata.Configure() c.Assert(err, jc.ErrorIsNil) data, err := cloudcfg.RenderYAML() c.Assert(err, jc.ErrorIsNil) configKeyValues := make(map[interface{}]interface{}) err = goyaml.Unmarshal(data, &configKeyValues) c.Assert(err, jc.ErrorIsNil) scripts := getScripts(configKeyValues) for i, script := range scripts { if strings.Contains(script, "bootstrap") { c.Logf("scripts[%d]: %q", i, script) } } expected := "jujud bootstrap-state --data-dir '.*' --env-config '.*'" + " --instance-id '.*' --bootstrap-constraints 'mem=4096M'" + " --environ-constraints 'mem=2048M' --show-log" assertScriptMatch(c, scripts, expected, false) }
func (*suite) TestReadHeaderLogsRequests(c *gc.C) { codecLogger := loggo.GetLogger("juju.rpc.jsoncodec") defer codecLogger.SetLogLevel(codecLogger.LogLevel()) codecLogger.SetLogLevel(loggo.TRACE) msg := `{"RequestId":1,"Type": "foo","Id": "id","Request":"frob","Params":{"X":"param"}}` codec := jsoncodec.New(&testConn{ readMsgs: []string{msg, msg, msg}, }) // Check that logging is off by default var h rpc.Header err := codec.ReadHeader(&h) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, "") // Check that we see a log message when we switch logging on. codec.SetLogging(true) err = codec.ReadHeader(&h) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, ".*TRACE juju.rpc.jsoncodec <- "+regexp.QuoteMeta(msg)+`\n`) // Check that we can switch it off again codec.SetLogging(false) err = codec.ReadHeader(&h) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, ".*TRACE juju.rpc.jsoncodec <- "+regexp.QuoteMeta(msg)+`\n`) }
func (srv *Server) serveConn(wsConn *websocket.Conn, reqNotifier *requestNotifier, envUUID string) error { codec := jsoncodec.NewWebsocket(wsConn) if loggo.GetLogger("juju.rpc.jsoncodec").EffectiveLogLevel() <= loggo.TRACE { codec.SetLogging(true) } var notifier rpc.RequestNotifier if logger.EffectiveLogLevel() <= loggo.DEBUG { // Incur request monitoring overhead only if we // know we'll need it. notifier = reqNotifier } conn := rpc.NewConn(codec, notifier) err := srv.validateEnvironUUID(envUUID) if err != nil { conn.Serve(&errRoot{err}, serverError) } else { conn.Serve(newStateServer(srv, conn, reqNotifier, srv.limiter), serverError) } conn.Start() select { case <-conn.Dead(): case <-srv.tomb.Dying(): } return conn.Close() }
// New returns a Worker backed by Config. The caller is responsible for // Kill()ing the Worker and handling any errors returned from Wait(); // but as it happens it's designed to be an apiserver/common.Resource, // and never to exit unless Kill()ed, so in practice Stop(), which will // call Kill() and Wait() internally, is Good Enough. func New(config Config) (*Worker, error) { if err := config.Validate(); err != nil { return nil, errors.Trace(err) } name := fmt.Sprintf("juju.apiserver.presence.%s", config.Identity) w := &Worker{ config: config, logger: loggo.GetLogger(name), running: make(chan struct{}), } err := catacomb.Invoke(catacomb.Plan{ Site: &w.catacomb, Work: w.loop, }) if err != nil { return nil, errors.Trace(err) } // To support unhappy assumptions in apiserver/server_test.go, // we block New until at least one attempt to start a Pinger // has been made. This preserves the apparent behaviour of an // unwrapped Pinger under normal conditions. select { case <-w.catacomb.Dying(): if err := w.Wait(); err != nil { return nil, errors.Trace(err) } return nil, errors.New("worker stopped abnormally without reporting an error") case <-w.running: return w, nil } }
func (c *JujuLogCommand) Run(ctx *cmd.Context) error { if c.formatFlag != "" { fmt.Fprintf(ctx.Stderr, "--format flag deprecated for command %q", c.Info().Name) } logger := loggo.GetLogger(fmt.Sprintf("unit.%s.juju-log", c.ctx.UnitName())) logLevel := loggo.INFO if c.Debug { logLevel = loggo.DEBUG } else if c.Level != "" { var ok bool logLevel, ok = loggo.ParseLevel(c.Level) if !ok { logger.Warningf("Specified log level of %q is not valid", c.Level) logLevel = loggo.INFO } } prefix := "" if r, err := c.ctx.HookRelation(); err == nil { prefix = r.FakeId() + ": " } else if !errors.IsNotFound(err) { return errors.Trace(err) } logger.Logf(logLevel, "%s%s", prefix, c.Message) return nil }
func (s *baseProviderSuite) SetUpTest(c *gc.C) { s.TestSuite.SetUpTest(c) loggo.GetLogger("juju.provider.local").SetLogLevel(loggo.TRACE) s.restore = local.MockAddressForInterface() s.PatchValue(&local.VerifyPrerequisites, func(containerType instance.ContainerType) error { return nil }) }
func (s *LxcSuite) SetUpTest(c *gc.C) { s.TestSuite.SetUpTest(c) loggo.GetLogger("juju.container.lxc").SetLogLevel(loggo.TRACE) s.events = make(chan mock.Event, 25) s.TestSuite.ContainerFactory.AddListener(s.events) s.PatchValue(&lxc.TemplateLockDir, c.MkDir()) s.PatchValue(&lxc.TemplateStopTimeout, 500*time.Millisecond) }
func (*GlobalSuite) TestRootLogger(c *gc.C) { var root loggo.Logger got := loggo.GetLogger("") c.Check(got.Name(), gc.Equals, root.Name()) c.Check(got.LogLevel(), gc.Equals, root.LogLevel()) }
func (s *RunTestSuite) TestRunningRemoteUnitNoRelation(c *gc.C) { loggo.GetLogger("worker.uniter").SetLogLevel(loggo.TRACE) s.runListenerForAgent(c, "unit-foo-1") _, err := testing.RunCommand(c, &RunCommand{}, "--remote-unit", "remote/0", "foo/1", "bar") c.Check(cmd.IsRcPassthroughError(err), jc.IsFalse) c.Assert(err, gc.ErrorMatches, "remote unit: remote/0, provided without a relation") }
func (s *RunTestSuite) TestRunningBadRelation(c *gc.C) { loggo.GetLogger("worker.uniter").SetLogLevel(loggo.TRACE) s.runListenerForAgent(c, "unit-foo-1") _, err := testing.RunCommand(c, &RunCommand{}, "--relation", "badrelation:W", "foo/1", "bar") c.Check(cmd.IsRcPassthroughError(err), jc.IsFalse) c.Assert(err, gc.ErrorMatches, "invalid relation id") }