// i thought conn will drop messages because the tE tcp handler can't keep up. // but looks like that's not true (anymore?), it just works without having to sleep after dispatch // also note the dummyPackets uses a channel api which probably causes most of the slowdown func benchmarkSendAndReceive(b *testing.B, dp *dummyPackets) { logging.SetLevel(logging.ERROR, "carbon-relay-ng") // testendpoint sends a warning because it does something bad with conn at end but it's harmless tE := NewTestEndpoint(nil, ":2005") na := tE.conditionNumAccepts(1) tE.Start() table = NewTableOrFatal(b, "", "addRoute sendAllMatch test1 127.0.0.1:2005") na.Wait() // reminder: go benchmark will invoke this with N = 0, then maybe N = 20, then maybe more // and the time it prints is function run divided by N, which // should be of a more or less stable time, which gets printed fmt.Println() for i := 0; i < b.N; i++ { log.Notice("iteration %d: sending %d metrics", i, dp.amount) ns := tE.conditionNumSeen(dp.amount * (i + 1)) for m := range dp.All() { //fmt.Println("dispatching", m) //fmt.Printf("dispatching '%s'\n", string(m)) table.Dispatch(m) } log.Notice("waiting until all %d messages received", dp.amount*(i+1)) ns.Wait() log.Notice("iteration %d done. received %d metrics (%d total)", i, dp.amount, dp.amount*(i+1)) } log.Notice("received all %d messages. wrapping up benchmark run", dp.amount*b.N) err := table.Shutdown() if err != nil { b.Fatal(err) } tE.Close() }
// just dispatch (coming into table), no matching or sending to route func BenchmarkTableDispatchMillion(b *testing.B) { logging.SetLevel(logging.WARNING, "carbon-relay-ng") // don't care about unroutable notices table = NewTableOrFatal(b, "", "") for i := 0; i < b.N; i++ { for j := 0; j < 1000000; j++ { table.Dispatch(metric70) } } }
func init() { instance = "test" packets0A = NewDummyPackets("0A", 1) packets1A = NewDummyPackets("1A", 10) packets1B = NewDummyPackets("1B", 10) packets1C = NewDummyPackets("1C", 10) packets3A = NewDummyPackets("3A", 1000) packets3B = NewDummyPackets("3B", 1000) packets3C = NewDummyPackets("3C", 1000) packets4A = NewDummyPackets("4A", 10000) packets5A = NewDummyPackets("5A", 100000) packets6A = NewDummyPackets("6A", 1000000) //packets6B = NewDummyPackets("6B", 1000000) //packets6C = NewDummyPackets("6C", 1000000) logging.SetLevel(logging.NOTICE, "carbon-relay-ng") metric70 = []byte("abcde_fghij.klmnopqrst.uv_wxyz.1234567890abcdefg 12345.6789 1234567890") // key = 48, val = 10, ts = 10 -> 70 }
func main() { flag.Usage = usage flag.Parse() config_file = "/etc/carbon-relay-ng.ini" if 1 == flag.NArg() { config_file = flag.Arg(0) } if _, err := toml.DecodeFile(config_file, &config); err != nil { log.Error("Cannot use config file '%s':\n", config_file) log.Error(err.Error()) usage() return } //runtime.SetBlockProfileRate(1) // to enable block profiling. in my experience, adds 35% overhead. levels := map[string]logging.Level{ "critical": logging.CRITICAL, "error": logging.ERROR, "warning": logging.WARNING, "notice": logging.NOTICE, "info": logging.INFO, "debug": logging.DEBUG, } level, ok := levels[config.Log_level] if !ok { log.Error("unrecognized log level '%s'\n", config.Log_level) return } logging.SetLevel(level, "carbon-relay-ng") if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } if len(config.Instance) == 0 { log.Error("instance identifier cannot be empty") os.Exit(1) } runtime.GOMAXPROCS(config.max_procs) instance = config.Instance expvar.NewString("instance").Set(instance) expvar.NewString("service").Set(service) log.Notice("===== carbon-relay-ng instance '%s' starting. =====\n", instance) numIn = Counter("unit=Metric.direction=in") numInvalid = Counter("unit=Err.type=invalid") if config.Instrumentation.Graphite_addr != "" { addr, err := net.ResolveTCPAddr("tcp", config.Instrumentation.Graphite_addr) if err != nil { log.Fatal(err) } go metrics.Graphite(metrics.DefaultRegistry, time.Duration(config.Instrumentation.Graphite_interval)*time.Millisecond, "", addr) } log.Notice("creating routing table...") maxAge, err := time.ParseDuration(config.Bad_metrics_max_age) if err != nil { log.Error("could not parse badMetrics max age") log.Error(err.Error()) os.Exit(1) } badMetrics = badmetrics.New(maxAge) table = NewTable(config.Spool_dir) log.Notice("initializing routing table...") for i, cmd := range config.Init { log.Notice("applying: %s", cmd) err = applyCommand(table, cmd) if err != nil { log.Error("could not apply init cmd #%d", i+1) log.Error(err.Error()) os.Exit(1) } } tablePrinted := table.Print() log.Notice("===========================") log.Notice("========== TABLE ==========") log.Notice("===========================") for _, line := range strings.Split(tablePrinted, "\n") { log.Notice(line) } // Follow the goagain protocol, <https://github.com/rcrowley/goagain>. l, ppid, err := goagain.GetEnvs() if nil != err { laddr, err := net.ResolveTCPAddr("tcp", config.Listen_addr) if nil != err { log.Error(err.Error()) os.Exit(1) } l, err = net.ListenTCP("tcp", laddr) if nil != err { log.Error(err.Error()) os.Exit(1) } log.Notice("listening on %v", laddr) go accept(l.(*net.TCPListener), config) } else { log.Notice("resuming listening on %v", l.Addr()) go accept(l.(*net.TCPListener), config) if err := goagain.KillParent(ppid); nil != err { log.Error(err.Error()) os.Exit(1) } for { err := syscall.Kill(ppid, 0) if err != nil { break } time.Sleep(10 * time.Millisecond) } } if config.Admin_addr != "" { go func() { err := adminListener(config.Admin_addr) if err != nil { fmt.Println("Error listening:", err.Error()) os.Exit(1) } }() } if config.Http_addr != "" { go HttpListener(config.Http_addr, table) } if err := goagain.AwaitSignals(l); nil != err { log.Error(err.Error()) os.Exit(1) } }