// Test log level. func TestGetSetLevel(t *testing.T) { assert := audit.NewTestingAssertion(t, true) logger.SetLevel(logger.LevelDebug) assert.Equal(logger.Level(), logger.LevelDebug) logger.SetLevel(logger.LevelCritical) assert.Equal(logger.Level(), logger.LevelCritical) logger.SetLevel(logger.LevelDebug) assert.Equal(logger.Level(), logger.LevelDebug) }
func main() { config := cachet.Config // TODO support log path logger.SetLogger(logger.NewTimeformatLogger(os.Stderr, "2006-01-02 15:04:05")) logger.SetLevel(logger.LevelDebug) logger.Infof("System: %s, API: %s", config.SystemName, config.APIUrl) logger.Infof("Starting %d monitors", len(config.MonitorConfigs)) // initialize monitors var allMonitors []*cachet.Monitor for _, monconf := range config.MonitorConfigs { err, mon := cachet.NewMonitor(&monconf) if err == nil { err = cachet.SyncMonitor(mon) if err != nil { logger.Errorf("%v", err) } allMonitors = append(allMonitors, mon) } else { logger.Errorf("Parsing monitor error, skipping: %v", err) } } ticker := time.NewTicker(time.Second * time.Duration(config.CheckInterval)) for range ticker.C { for _, m := range allMonitors { go m.Check() } } }
// TestFiltering tests the filtering of the logging. func TestFiltering(t *testing.T) { assert := audit.NewTestingAssertion(t, true) ownLogger := &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelDebug) logger.SetFilter(func(level logger.LogLevel, info, msg string) bool { return level >= logger.LevelWarning && level <= logger.LevelError }) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 3) logger.UnsetFilter() ownLogger = &testLogger{} logger.SetLogger(ownLogger) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 5) }
// Test logging with the go logger. func TestGoLogger(t *testing.T) { log.SetOutput(os.Stdout) logger.SetLevel(logger.LevelDebug) logger.SetLogger(logger.NewGoLogger()) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") }
// Test log level filtering. func TestLogLevelFiltering(t *testing.T) { assert := audit.NewTestingAssertion(t, true) ownLogger := &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelDebug) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 5) ownLogger = &testLogger{} logger.SetLogger(ownLogger) logger.SetLevel(logger.LevelError) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") assert.Length(ownLogger.logs, 2) }
// Test logging with the syslogger. func TestSysLogger(t *testing.T) { assert := audit.NewTestingAssertion(t, true) logger.SetLevel(logger.LevelDebug) sl, err := logger.NewSysLogger("GOAS") assert.Nil(err) logger.SetLogger(sl) logger.Debugf("Debug.") logger.Infof("Info.") logger.Warningf("Warning.") logger.Errorf("Error.") logger.Criticalf("Critical.") }
func main() { golib_logger.SetLevel(golib_logger.LevelInfo) log.Print("start") rootEnvironment = cells.NewEnvironment() defer rootEnvironment.Stop() addBuilding(rootEnvironment, "school") addRoom(rootEnvironment, "school", "cafeteria") startServer(rootEnvironment) /* addRoom(rootEnvironment, "school", "playground") time.Sleep(time.Millisecond * 100) addLogUser(rootEnvironment, "school", "cafeteria") addUser(rootEnvironment, "school", "cafeteria", "willy") addUser(rootEnvironment, "school", "cafeteria", "bart") addUser(rootEnvironment, "school", "cafeteria", "lisa") time.Sleep(time.Millisecond * 100) bart := makeUserID("bart") rootEnvironment.EmitNew(bart, SAY, cells.PayloadValues{ "room": makeRoomID("cafeteria", "school"), "message": "Don't have a cow, Man!", }, nil) time.Sleep(time.Millisecond * 100) rootEnvironment.EmitNew(bart, SAY, cells.PayloadValues{ "room": makeRoomID("cafeteria", "school"), "message": "I'm Bart Simpson, who the hell are you?", }, nil) time.Sleep(time.Millisecond * 100) rootEnvironment.EmitNew(bart, SAY, cells.PayloadValues{ "room": makeRoomID("cafeteria", "school"), "message": "what teh hell ?", }, nil) time.Sleep(time.Second * 5) */ }
func init() { logger.SetLevel(logger.LevelDebug) }
// ParseConfigOptions reads and applies arguments from the command line and the // configuration file, merging them together as needed, with command line options // taking precedence over options in the config file. func ParseConfigOptions() error { var opts = &Options{} _, err := flags.Parse(opts) if err != nil { if err.(*flags.Error).Type == flags.ErrHelp { os.Exit(0) } else { log.Println(err) os.Exit(1) } } if opts.Version { fmt.Printf("goiardi version %s built with %s (aiming for compatibility with Chef Server version %s).\n", Version, runtime.Version(), ChefVersion) os.Exit(0) } /* Load the config file. Command-line options have precedence over * config file options. */ if opts.ConfFile != "" { if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil { log.Println(err) os.Exit(1) } Config.ConfFile = opts.ConfFile Config.FreezeData = false } if opts.Export != "" && opts.Import != "" { log.Println("Cannot use -x/--export and -m/--import flags together.") os.Exit(1) } if opts.Export != "" { Config.DoExport = true Config.ImpExFile = opts.Export } else if opts.Import != "" { Config.DoImport = true Config.ImpExFile = opts.Import } if opts.Hostname != "" { Config.Hostname = opts.Hostname } else { if Config.Hostname == "" { Config.Hostname, err = os.Hostname() if err != nil { log.Println(err) Config.Hostname = "localhost" } } } if opts.DataStoreFile != "" { Config.DataStoreFile = opts.DataStoreFile } if opts.IndexFile != "" { Config.IndexFile = opts.IndexFile } // Use MySQL? if opts.UseMySQL { Config.UseMySQL = opts.UseMySQL } // Use Postgres? if opts.UsePostgreSQL { Config.UsePostgreSQL = opts.UsePostgreSQL } if Config.UseMySQL && Config.UsePostgreSQL { err := fmt.Errorf("The MySQL and Postgres options cannot be used together.") log.Println(err) os.Exit(1) } // Use Postgres search? if opts.PgSearch { // make sure postgres is enabled if !Config.UsePostgreSQL { err := fmt.Errorf("--pg-search requires --use-postgresql (which makes sense, really).") log.Println(err) os.Exit(1) } Config.PgSearch = opts.PgSearch } if Config.DataStoreFile != "" && (Config.UseMySQL || Config.UsePostgreSQL) { err := fmt.Errorf("The MySQL or Postgres and data store options may not be specified together.") log.Println(err) os.Exit(1) } if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) && Config.IndexFile != "")) { err := fmt.Errorf("-i and -D must either both be specified, or not specified") log.Println(err) os.Exit(1) } if (Config.UseMySQL || Config.UsePostgreSQL) && (Config.IndexFile == "" && !Config.PgSearch) { err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL or PostgreSQL backend.") log.Println(err) os.Exit(1) } if Config.IndexFile != "" && (Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) { Config.FreezeData = true } if opts.LogFile != "" { Config.LogFile = opts.LogFile } if opts.SysLog { Config.SysLog = opts.SysLog } if Config.LogFile != "" { lfp, lerr := os.Create(Config.LogFile) if lerr != nil { log.Println(err) os.Exit(1) } log.SetOutput(lfp) } if dlev := len(opts.Verbose); dlev != 0 { Config.DebugLevel = dlev } if Config.LogLevel != "" { if lev, ok := LogLevelNames[strings.ToLower(Config.LogLevel)]; ok && Config.DebugLevel == 0 { Config.DebugLevel = lev } } if Config.DebugLevel > 4 { Config.DebugLevel = 4 } Config.DebugLevel = int(logger.LevelCritical) - Config.DebugLevel logger.SetLevel(logger.LogLevel(Config.DebugLevel)) debugLevel := map[int]string{0: "debug", 1: "info", 2: "warning", 3: "error", 4: "critical"} log.Printf("Logging at %s level", debugLevel[Config.DebugLevel]) if Config.SysLog { sl, err := logger.NewSysLogger("goiardi") if err != nil { log.Println(err.Error()) os.Exit(1) } logger.SetLogger(sl) } else { logger.SetLogger(logger.NewGoLogger()) } /* Database options */ // Don't bother setting a default mysql port if mysql isn't used if Config.UseMySQL { if Config.MySQL.Port == "" { Config.MySQL.Port = "3306" } } // set default Postgres options if Config.UsePostgreSQL { if Config.PostgreSQL.Port == "" { Config.PostgreSQL.Port = "5432" } } if opts.LocalFstoreDir != "" { Config.LocalFstoreDir = opts.LocalFstoreDir } if Config.LocalFstoreDir == "" && (Config.UseMySQL || Config.UsePostgreSQL) { logger.Criticalf("local-filestore-dir must be set when running goiardi in SQL mode") os.Exit(1) } if Config.LocalFstoreDir != "" { finfo, ferr := os.Stat(Config.LocalFstoreDir) if ferr != nil { logger.Criticalf("Error checking local filestore dir: %s", ferr.Error()) os.Exit(1) } if !finfo.IsDir() { logger.Criticalf("Local filestore dir %s is not a directory", Config.LocalFstoreDir) os.Exit(1) } } if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) { logger.Warningf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.") } if opts.FreezeInterval != 0 { Config.FreezeInterval = opts.FreezeInterval } if Config.FreezeInterval == 0 { Config.FreezeInterval = 10 } /* Root directory for certs and the like */ if opts.ConfRoot != "" { Config.ConfRoot = opts.ConfRoot } if Config.ConfRoot == "" { if Config.ConfFile != "" { Config.ConfRoot = path.Dir(Config.ConfFile) } else { Config.ConfRoot = "." } } if opts.Ipaddress != "" { Config.Ipaddress = opts.Ipaddress } if Config.Ipaddress != "" { ip := net.ParseIP(Config.Ipaddress) if ip == nil { logger.Criticalf("IP address '%s' is not valid", Config.Ipaddress) os.Exit(1) } } if opts.Port != 0 { Config.Port = opts.Port } if Config.Port == 0 { Config.Port = 4545 } if opts.UseSSL { Config.UseSSL = opts.UseSSL } if opts.SSLCert != "" { Config.SSLCert = opts.SSLCert } if opts.SSLKey != "" { Config.SSLKey = opts.SSLKey } if opts.HTTPSUrls { Config.HTTPSUrls = opts.HTTPSUrls } // SSL setup if Config.Port == 80 { Config.UseSSL = false } else if Config.Port == 443 { Config.UseSSL = true } if Config.UseSSL { if Config.SSLCert == "" || Config.SSLKey == "" { logger.Criticalf("SSL mode requires specifying both a certificate and a key file.") os.Exit(1) } /* If the SSL cert and key are not absolute files, join them * with the conf root */ if !path.IsAbs(Config.SSLCert) { Config.SSLCert = path.Join(Config.ConfRoot, Config.SSLCert) } if !path.IsAbs(Config.SSLKey) { Config.SSLKey = path.Join(Config.ConfRoot, Config.SSLKey) } } if opts.TimeSlew != "" { Config.TimeSlew = opts.TimeSlew } if Config.TimeSlew != "" { d, derr := time.ParseDuration(Config.TimeSlew) if derr != nil { logger.Criticalf("Error parsing time-slew: %s", derr.Error()) os.Exit(1) } Config.TimeSlewDur = d } else { Config.TimeSlewDur, _ = time.ParseDuration(DefaultTimeSlew) } if opts.UseAuth { Config.UseAuth = opts.UseAuth } if opts.DisableWebUI { Config.DisableWebUI = opts.DisableWebUI } if opts.LogEvents { Config.LogEvents = opts.LogEvents } if opts.LogEventKeep != 0 { Config.LogEventKeep = opts.LogEventKeep } // Set max sizes for objects and json requests. if opts.ObjMaxSize != 0 { Config.ObjMaxSize = opts.ObjMaxSize } if opts.JSONReqMaxSize != 0 { Config.JSONReqMaxSize = opts.JSONReqMaxSize } if Config.ObjMaxSize == 0 { Config.ObjMaxSize = 10485760 } if Config.JSONReqMaxSize == 0 { Config.JSONReqMaxSize = 1000000 } if opts.UseUnsafeMemStore { Config.UseUnsafeMemStore = opts.UseUnsafeMemStore } if opts.DbPoolSize != 0 { Config.DbPoolSize = opts.DbPoolSize } if opts.MaxConn != 0 { Config.MaxConn = opts.MaxConn } if !UsingDB() { if Config.DbPoolSize != 0 { logger.Infof("db-pool-size is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.DbPoolSize) } if Config.MaxConn != 0 { logger.Infof("max-connections is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.MaxConn) } } if opts.UseSerf { Config.UseSerf = opts.UseSerf } if Config.UseSerf { if opts.SerfAddr != "" { Config.SerfAddr = opts.SerfAddr } if Config.SerfAddr == "" { Config.SerfAddr = "127.0.0.1:7373" } } if opts.SerfEventAnnounce { Config.SerfEventAnnounce = opts.SerfEventAnnounce } if Config.SerfEventAnnounce && !Config.UseSerf { logger.Criticalf("--serf-event-announce requires --use-serf") os.Exit(1) } if opts.UseShovey { if !Config.UseSerf { logger.Criticalf("--use-shovey requires --use-serf to be enabled") os.Exit(1) } Config.UseShovey = opts.UseShovey } // shovey signing key stuff if opts.SignPrivKey != "" { Config.SignPrivKey = opts.SignPrivKey } // if using shovey, open the existing, or create if absent, signing // keys. if Config.UseShovey { if Config.SignPrivKey == "" { Config.SignPrivKey = path.Join(Config.ConfRoot, "shovey-sign_rsa") } else if !path.IsAbs(Config.SignPrivKey) { Config.SignPrivKey = path.Join(Config.ConfRoot, Config.SignPrivKey) } privfp, err := os.Open(Config.SignPrivKey) if err != nil { logger.Criticalf("Private key %s for signing shovey requests not found. Please create a set of RSA keys for this purpose.", Config.SignPrivKey) os.Exit(1) } privPem, err := ioutil.ReadAll(privfp) if err != nil { logger.Criticalf(err.Error()) os.Exit(1) } privBlock, _ := pem.Decode(privPem) if privBlock == nil { logger.Criticalf("Invalid block size for private key for shovey") os.Exit(1) } privKey, err := x509.ParsePKCS1PrivateKey(privBlock.Bytes) if err != nil { logger.Criticalf(err.Error()) os.Exit(1) } Key.Lock() defer Key.Unlock() Key.PrivKey = privKey } if opts.DotSearch { Config.DotSearch = opts.DotSearch } else if Config.PgSearch { Config.DotSearch = true } if Config.DotSearch { if opts.ConvertSearch { Config.ConvertSearch = opts.ConvertSearch } } if Config.IndexFile != "" && Config.PgSearch { logger.Infof("Specifying an index file for search while using the postgres search isn't useful.") } return nil }