Beispiel #1
0
func main() {
	s, err := g2s.Dial("udp", "54.200.145.61:8125")
	if err != nil {
		return
	}
	s.Counter(1.0, "test.g2s", 1)
}
Beispiel #2
0
// setup connections to other services running on this host
func setupServices() {
	var error error
	services.Statsd, error = g2s.Dial("udp", "localhost:8125")
	if error != nil {
		log.Fatal("could not set up statsd client.")
	}
	services.Memcached = memcache.New("localhost:11211")

	// setup push service
	usingSandbox := "true" // true or false
	uniqushResponse, uniqushError := http.PostForm("http://localhost:9898/addpsp", url.Values{
		"pushservicetype": {"apns"},
		"service":         {"newspeak"},
		"cert":            {"/etc/newspeak/apns-certs/cert.pem"},
		"key":             {"/etc/newspeak/apns-certs/priv-noenc.pem"},
		"sandbox":         {usingSandbox},
	})
	if uniqushError != nil {
		log.Fatal("could not add push service provider for apple push notifications: " + string(uniqushError.Error()))
	} else {
		uniqushResponseBodyBytes, uniqushError := ioutil.ReadAll(uniqushResponse.Body)
		uniqushResponseBody := string(uniqushResponseBodyBytes)
		uniqushResponse.Body.Close()
		if uniqushError != nil {
			log.Fatal("could not read response when adding push service provider for apple push notifications: " + string(uniqushError.Error()))
		} else if uniqushResponseBody[0:30] != "[AddPushServiceProvider][Info]" {
			log.Fatal("invalid response when adding push service provider for apple push notifications: " + uniqushResponseBody)
		} else {
			fmt.Println("added push service provider for apple push notifications. usingSandbox:" + usingSandbox + ", uniqush response:" + uniqushResponseBody)
		}
	}
}
func main() {
	// Parse command line arguments
	var (
		config_file = flag.String("config", "", "Path to configuration file")
	)
	flag.Parse()

	// Load configuration into package variable Config
	config_error := gcfg.ReadFileInto(&Config, *config_file)
	if config_error != nil {
		log.Fatal("Could not load config file: " + config_error.Error())
	}

	// Instantiate StatsD connection
	if Config.Statsd.Host == "" {
		StatsD = g2s.Noop()
	} else {
		StatsD, _ = g2s.Dial(Config.Statsd.Protocol, Config.Statsd.Host+":"+Config.Statsd.Port)
	}

	// Log startup
	log.Println("go-airbrake-proxy started")

	// Fire up an HTTP server and handle it
	http.HandleFunc("/", httpHandler)
	http.ListenAndServe(Config.Listen.Host+":"+Config.Listen.Port, nil)
}
Beispiel #4
0
func Dial(proto, addr string) *Statsd {
	st, err := g2s.Dial(proto, addr)

	if err != nil {
		log.Printf("Couldn't initiate statsd with address: '%s'", addr)
		return nil
	}
	return &Statsd{st}
}
Beispiel #5
0
func loadStatsd(addr string) g2s.Statter {
	s, err := g2s.Dial("udp", addr)
	if err != nil {
		log.Warnf("Error initialising statsd connection to %v", addr)
		return nil
	}

	return s
}
Beispiel #6
0
func (s *StatsD) CreateClient() {
	if s.Enabled && s.Client == nil {
		log.Println("StatsD is enabled")
		client, err := g2s.Dial("udp", s.Host)
		if err != nil {
			log.Fatalf("Cannot connect to statsd server %v: %v ", s.Host, err)
		}
		s.Client = client
	}

}
Beispiel #7
0
func init() {
	addr := DEFAULT_STATSD_HOST
	if env := os.Getenv(ENV_STATSD); env != "" {
		addr = env
	}

	s, err := g2s.Dial("udp", addr)
	if err != nil {
		log.Critical(err)
		os.Exit(-1)
	}
	_statter = s
}
Beispiel #8
0
func main() {
	configtoml := flag.String("f", "moxy.toml", "Path to config. (default moxy.toml)")
	flag.Parse()
	file, err := ioutil.ReadFile(*configtoml)
	if err != nil {
		log.Fatal(err)
	}
	err = toml.Unmarshal(file, &config)
	if err != nil {
		log.Fatal("Problem parsing config: ", err)
	}
	if config.Statsd != "" {
		statsd, _ = g2s.Dial("udp", config.Statsd)
	}
	moxystats := stats.New()
	mux := http.NewServeMux()
	mux.HandleFunc("/moxy_callback", moxy_callback)
	mux.HandleFunc("/moxy_apps", moxy_apps)
	mux.HandleFunc("/moxy_stats", func(w http.ResponseWriter, req *http.Request) {
		if config.Xproxy != "" {
			w.Header().Add("X-Proxy", config.Xproxy)
		}
		stats := moxystats.Data()
		b, _ := json.MarshalIndent(stats, "", "  ")
		w.Write(b)
		return
	})
	mux.HandleFunc("/", moxy_proxy)
	// In case we want to log req/resp.
	//trace, _ := trace.New(redirect, os.Stdout)
	handler := moxystats.Handler(mux)
	s := &http.Server{
		Addr:    ":" + config.Port,
		Handler: handler,
	}
	callbackworker()
	callbackqueue <- true
	if config.TLS {
		log.Println("Starting moxy tls on :" + config.Port)
		err := s.ListenAndServeTLS(config.Cert, config.Key)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		log.Println("Starting moxy on :" + config.Port)
		err := s.ListenAndServe()
		if err != nil {
			log.Fatal(err)
		}
	}
}
func loadStatsd(addr string) g2s.Statter {
	disabled := config.AtPath("hailo", "service", "instrumentation", "statsd", "disabled").AsBool()
	if disabled {
		return g2s.Noop()
	}

	s, err := g2s.Dial("udp", addr)
	if err != nil {
		log.Warnf("Error initialising statsd connection to %v", addr)
		return nil
	}

	return s
}
Beispiel #10
0
func init() {
	addr := DEFAULT_STATSD_HOST
	if env := os.Getenv(ENV_STATSD); env != "" {
		addr = env
	}

	s, err := g2s.Dial("udp", addr)
	if err != nil {
		println(err)
		os.Exit(-1)
	}
	_statter = s

	go pprof_task()
}
func BenchmarkG2s(b *testing.B) {
	s := newServer()
	c, err := g2s.Dial("udp", addr)
	if err != nil {
		b.Fatal(err)
	}
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		c.Counter(1, counterKey, 1)
		c.Gauge(1, gaugeKey, strconv.Itoa(gaugeValue))
		c.Timing(1, timingKey, tValDur)
	}
	b.StopTimer()
	s.Close()
}
Beispiel #12
0
func init() {
	addr := DEFAULT_STATSD_HOST
	if env := os.Getenv(ENV_STATSD); env != "" {
		addr = env
	}

	s, err := g2s.Dial("udp", addr)
	if err == nil {
		_statter = s
	} else {
		_statter = g2s.Noop()
		log.Println(err)
	}

	go pprof_task()
}
Beispiel #13
0
func setupStatsd() (g2s.Statter, error) {
	if config.Statsd.Addr == "" {
		return g2s.Noop(), nil
	}

	if config.Statsd.Namespace == "" {
		hostname, _ := os.Hostname()
		config.Statsd.Namespace = "nixy." + hostname
	}

	if config.Statsd.SampleRate < 1 || config.Statsd.SampleRate > 100 {
		config.Statsd.SampleRate = 100
	}

	return g2s.Dial("udp", config.Statsd.Addr)
}
Beispiel #14
0
func main() {
	configtoml := flag.String("f", "nixy.toml", "Path to config. (default nixy.toml)")
	version := flag.Bool("v", false, "prints current nixy version")
	flag.Parse()
	if *version {
		fmt.Println(VERSION)
		os.Exit(0)
	}
	file, err := ioutil.ReadFile(*configtoml)
	if err != nil {
		log.Fatal(err)
	}
	err = toml.Unmarshal(file, &config)
	if err != nil {
		log.Fatal("Problem parsing config: ", err)
	}
	if config.Statsd != "" {
		statsd, _ = g2s.Dial("udp", config.Statsd)
	}
	nixystats := stats.New()
	//mux := http.NewServeMux()
	mux := mux.NewRouter()
	mux.HandleFunc("/", nixy_version)
	mux.HandleFunc("/v1/reload", nixy_reload)
	mux.HandleFunc("/v1/apps", nixy_apps)
	mux.HandleFunc("/v1/health", nixy_health)
	mux.HandleFunc("/v1/stats", func(w http.ResponseWriter, req *http.Request) {
		stats := nixystats.Data()
		b, _ := json.MarshalIndent(stats, "", "  ")
		w.Write(b)
		return
	})
	handler := nixystats.Handler(mux)
	s := &http.Server{
		Addr:    ":" + config.Port,
		Handler: handler,
	}
	eventStream()
	eventWorker()
	log.Println("Starting nixy on :" + config.Port)
	err = s.ListenAndServe()
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #15
0
func ExamplePanel_stats() {
	// This example demonstrates how to push circuit breaker stats to statsd via a Panel.
	// This example uses g2s. Anything conforming to the Statter interface can be used.
	s, err := g2s.Dial("udp", "statsd-server:8125")
	if err != nil {
		log.Fatal(err)
	}

	breaker := NewThresholdBreaker(10)
	panel := NewPanel()
	panel.Statter = s
	panel.StatsPrefixf = "sys.production"
	panel.Add("x", breaker)

	breaker.Trip()  // sys.production.circuit.x.tripped
	breaker.Reset() // sys.production.circuit.x.reset, sys.production.circuit.x.trip-time
	breaker.Fail()  // sys.production.circuit.x.fail
	breaker.Ready() // sys.production.circuit.x.ready (if it's tripped and ready to retry)
}
Beispiel #16
0
func main() {
	var conf Config

	fn := Nop

	confpath := flag.String("conf", "servers.json", "")

	flag.Parse()

	file, _ := ioutil.ReadFile(*confpath)

	json.Unmarshal(file, &conf)

	if conf.Statsd != "" {
		e, _ := g2s.Dial("udp", conf.Statsd)
		fn = (&Statsd{e}).Report
	}

	NewYardstick(conf.Listen, conf.Peers, fn).Run()

	<-make(chan struct{})
}
Beispiel #17
0
func main() {
	configtoml := flag.String("f", "nixy.toml", "Path to config. (default nixy.toml)")
	version := flag.Bool("v", false, "prints current nixy version")
	flag.Parse()
	if *version {
		fmt.Println(VERSION)
		os.Exit(0)
	}
	file, err := ioutil.ReadFile(*configtoml)
	if err != nil {
		log.Fatal(err)
	}
	err = toml.Unmarshal(file, &config)
	if err != nil {
		log.Fatal("Problem parsing config: ", err)
	}
	if config.Statsd != "" {
		statsd, _ = g2s.Dial("udp", config.Statsd)
	}
	mux := mux.NewRouter()
	mux.HandleFunc("/", nixy_version)
	mux.HandleFunc("/v1/reload", nixy_reload)
	mux.HandleFunc("/v1/config", nixy_config)
	mux.HandleFunc("/v1/health", nixy_health)
	s := &http.Server{
		Addr:    ":" + config.Port,
		Handler: mux,
	}
	endpoint = config.Marathon[0] // lets start with the first node.
	endpointHealth()
	eventStream()
	eventWorker()
	log.Println("Starting nixy on :" + config.Port)
	err = s.ListenAndServe()
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #18
0
func runCollector() {
	for !flag.Parsed() {
		// Defer execution of this goroutine.
		runtime.Gosched()

		// Add an initial delay while the program initializes to avoid attempting to collect
		// metrics prior to our flags being available / parsed.
		time.Sleep(1 * time.Second)
	}

	s, err := g2s.Dial("udp", *statsd)
	if err != nil {
		panic(fmt.Sprintf("Unable to connect to Statsd on %s - %s", *statsd, err))
	}

	if *prefix == "<detect-hostname>" {
		hn, err := os.Hostname()

		if err != nil {
			*prefix = "go.unknown"
		} else {
			*prefix = "go." + hn
		}
	}
	*prefix += "."

	gaugeFunc := func(key string, val uint64) {
		s.Gauge(1.0, *prefix+key, strconv.FormatUint(val, 10))
	}
	c := collector.New(gaugeFunc)
	c.PauseDur = time.Duration(*pause) * time.Second
	c.EnableCPU = *cpu
	c.EnableMem = *mem
	c.EnableGC = *gc
	c.Run()
}
Beispiel #19
0
// Instanciate a new Backend that will send data to a statsd instance
func NewStatsdBackend(host, port, protocol, prefix string) (Backend, error) {
	if host == "" {
		return nil, fmt.Errorf("Statsd host cannot be empty")
	}

	if port == "" {
		port = "8125"
	}

	if protocol == "" {
		protocol = "udp"
	}

	if prefix == "" {
		prefix = "checks."
	}

	statsd, err := g2s.Dial(protocol, net.JoinHostPort(host, port))
	if err != nil {
		return nil, err
	}

	return &statsdBackend{statsd: statsd, prefix: prefix}, nil
}
Beispiel #20
0
func main() {
	var (
		redisInstances      = flag.String("redis.instances", "", "Semicolon-separated list of comma-separated lists of Redis instances")
		redisConnectTimeout = flag.Duration("redis.connect.timeout", 3*time.Second, "Redis connect timeout")
		redisReadTimeout    = flag.Duration("redis.read.timeout", 3*time.Second, "Redis read timeout")
		redisWriteTimeout   = flag.Duration("redis.write.timeout", 3*time.Second, "Redis write timeout")
		redisMCPI           = flag.Int("redis.mcpi", 2, "Max connections per Redis instance")
		redisHash           = flag.String("redis.hash", "murmur3", "Redis hash function: murmur3, fnv, fnva")
		maxSize             = flag.Int("max.size", 10000, "Maximum number of events per key")
		batchSize           = flag.Int("batch.size", 100, "keys to select per request")
		maxKeysPerSecond    = flag.Int64("max.keys.per.second", 1000, "max keys per second to walk")
		scanLogInterval     = flag.Duration("scan.log.interval", 5*time.Second, "how often to report scan rates in log")
		once                = flag.Bool("once", false, "walk entire keyspace once and exit (default false, walk forever)")
		statsdAddress       = flag.String("statsd.address", "", "Statsd address (blank to disable)")
		statsdSampleRate    = flag.Float64("statsd.sample.rate", 0.1, "Statsd sample rate for normal metrics")
		statsdBucketPrefix  = flag.String("statsd.bucket.prefix", "myservice.", "Statsd bucket key prefix, including trailing period")
		httpAddress         = flag.String("http.address", ":6060", "HTTP listen address (profiling endpoints only)")
	)
	flag.Parse()
	log.SetFlags(log.Lmicroseconds)

	// Validate integer arguments.
	if *maxKeysPerSecond < int64(*batchSize) {
		log.Fatal("max keys per second should be bigger than batch size")
	}

	// Set up statsd instrumentation, if it's specified.
	stats := g2s.Noop()
	if *statsdAddress != "" {
		var err error
		stats, err = g2s.Dial("udp", *statsdAddress)
		if err != nil {
			log.Fatal(err)
		}
	}
	instr := statsd.New(stats, float32(*statsdSampleRate), *statsdBucketPrefix)

	// Parse hash function.
	var hashFunc func(string) uint32
	switch strings.ToLower(*redisHash) {
	case "murmur3":
		hashFunc = pool.Murmur3
	case "fnv":
		hashFunc = pool.FNV
	case "fnva":
		hashFunc = pool.FNVa
	default:
		log.Fatalf("unknown hash '%s'", *redisHash)
	}

	// Set up the clusters.
	clusters, err := makeClusters(
		*redisInstances,
		*redisConnectTimeout, *redisReadTimeout, *redisWriteTimeout,
		*redisMCPI,
		hashFunc,
		*maxSize,
		instr,
	)
	if err != nil {
		log.Fatal(err)
	}

	// HTTP server for profiling
	go func() { log.Print(http.ListenAndServe(*httpAddress, nil)) }()

	// Set up our rate limiter. Remember: it's per-key, not per-request.
	freq := time.Duration(1/(*maxKeysPerSecond)) * time.Second
	bucket := tb.NewBucket(*maxKeysPerSecond, freq)

	// Build the farm
	readStrategy := farm.SendAllReadAll
	repairStrategy := farm.AllRepairs // blocking
	dst := farm.New(clusters, len(clusters), readStrategy, repairStrategy, instr)

	// Perform the walk
	begin := time.Now()
	for {
		src := scan(clusters, *batchSize, *scanLogInterval) // new key set
		walkOnce(dst, bucket, src, *maxSize, instr)
		if *once {
			break
		}
	}
	log.Printf("walk complete in %s", time.Since(begin))
}
Beispiel #21
0
func main() {
	flag.Parse()
	if *verbose {
		log.Printf("%v -> %v\n", *localAddr, *remoteAddr)
	}

	addr, err := net.ResolveTCPAddr("tcp", *localAddr)
	if err != nil {
		log.Fatal("cannot resolve local address: ", err)
	}
	rAddr, err := net.ResolveTCPAddr("tcp", *remoteAddr)
	if err != nil {
		log.Fatal("cannot resolve remote address: ", err)
	}

	cb := circuit.NewRateBreaker(*threshold, *minSamples)
	events := cb.Subscribe()

	if *statsdHost != "" && *metricBase != "" && *metricName != "" {
		log.Println("logging to statsd")
		s, err := g2s.Dial("udp", *statsdHost)
		if err != nil {
			log.Fatal(err)
		}
		panel := circuit.NewPanel()
		panel.StatsPrefixf = *metricBase + ".%s"
		panel.Statter = s
		panel.Add(*metricName, cb)
	}
	if *verbose {
		go func() {
			for {
				e := <-events
				switch e {
				case circuit.BreakerTripped:
					log.Println("breaker tripped")
				case circuit.BreakerReset:
					log.Println("breaker reset")
				case circuit.BreakerFail:
					log.Println("breaker fail")
				case circuit.BreakerReady:
					log.Println("breaker ready")
				}
			}
		}()
	}

	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		log.Fatal("cannot bind to local port: ", err)
	}

	pending, complete := make(chan *net.TCPConn), make(chan *net.TCPConn)

	for i := 0; i < 5; i++ {
		go handleConn(pending, complete, rAddr, cb)
	}
	go closeConn(complete)

	for {
		conn, err := listener.AcceptTCP()
		if err != nil {
			log.Fatal("error starting listener: ", err)
		}
		pending <- conn
	}
}
Beispiel #22
0
		if uniqushError != nil {
			log.Fatal("could not read response when adding push service provider for apple push notifications: " + string(uniqushError.Error()))
		} else if uniqushResponseBody[0:30] != "[AddPushServiceProvider][Info]" {
			log.Fatal("invalid response when adding push service provider for apple push notifications: " + uniqushResponseBody)
		} else {
			fmt.Println("added push service provider for apple push notifications. usingSandbox:" + usingSandbox + ", uniqush response:" + uniqushResponseBody)
		}
	}
}

// print detailed stats about the request to the log and push stats data to graphite.
// runs as separate goroutine, based on falcore.request.go:Trace()
// falcore docs say this is a big hit on performance and should only be used for debugging or development.
var completionCallback = func(falcoreRequest *falcore.Request, response *http.Response) {
	go func() {
		Statsd, statsdError := g2s.Dial("udp", "localhost:8125")
		requestTimeDiff := falcore.TimeDiff(falcoreRequest.StartTime, falcoreRequest.EndTime)
		httpRequest := falcoreRequest.HttpRequest

		// stats for the whole request
		falcore.Trace("%s [%s] %s%s S=%v Sig=%s Tot=%.4fs", falcoreRequest.ID, httpRequest.Method, httpRequest.Host, httpRequest.URL, response.StatusCode, falcoreRequest.Signature(), requestTimeDiff)
		if statsdError == nil {
			Statsd.Timing(1.0, "api.request-time", falcoreRequest.EndTime.Sub(falcoreRequest.StartTime))
		}

		// stats for each pipeline stage
		stages := falcoreRequest.PipelineStageStats
		for stage := stages.Front(); stage != nil; stage = stage.Next() {
			pipelineStageStats, _ := stage.Value.(*falcore.PipelineStageStat)

			stageTimeDiff := falcore.TimeDiff(pipelineStageStats.StartTime, pipelineStageStats.EndTime)
Beispiel #23
0
func main() {
	flag.Parse()
	if *verbose {
		log.Printf("%v -> %v\n", *localAddr, *remoteAddr)
	}

	addr, err := net.ResolveTCPAddr("tcp", *localAddr)
	if err != nil {
		log.Fatal("cannot resolve local address: ", err)
	}
	rAddr, err := net.ResolveTCPAddr("tcp", *remoteAddr)
	if err != nil {
		log.Fatal("cannot resolve remote address: ", err)
	}
	options := circuit.Options{
		ShouldTrip:    circuit.RateTripFunc(*threshold, *minSamples),
		WindowTime:    time.Duration(*windowTime) * time.Millisecond,
		WindowBuckets: int(*windowBuckets),
	}
	cb := circuit.NewBreakerWithOptions(&options)
	events := cb.Subscribe()

	state.Set("ready")

	if *statsdHost != "" && *metricBase != "" && *metricName != "" {
		log.Println("logging to statsd")
		s, err := g2s.Dial("udp", *statsdHost)
		if err != nil {
			log.Fatal(err)
		}
		panel := circuit.NewPanel()
		panel.StatsPrefixf = *metricBase + ".%s"
		panel.Statter = s
		panel.Add(*metricName, cb)
	}

	go func() {
		for {
			e := <-events
			eventsCount.Add(1)
			switch e {
			case circuit.BreakerTripped:
				state.Set("tripped")
				if *verbose {
					log.Println("breaker tripped")
				}
			case circuit.BreakerReset:
				state.Set("reset")
				if *verbose {
					log.Println("breaker reset")
				}
			case circuit.BreakerFail:
				state.Set("fail")
				if *verbose {
					log.Println("breaker fail")
				}
			case circuit.BreakerReady:
				state.Set("ready")
				if *verbose {
					log.Println("breaker ready")
				}
			}
		}
	}()

	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		log.Fatal("cannot bind to local port: ", err)
	}

	pending, complete := make(chan *net.TCPConn), make(chan *net.TCPConn)

	for i := 0; i < 5; i++ {
		go handleConn(pending, complete, rAddr, cb)
	}
	go closeConn(complete)

	go func() {
		// serve the expvars endpoint
		http.ListenAndServe(*expvarAddr, nil)
	}()

	for {
		conn, err := listener.AcceptTCP()
		if err != nil {
			log.Fatal("error starting listener: ", err)
		}
		connectionsCount.Add(1)
		pending <- conn
	}
}
Beispiel #24
0
func main() {
	var (
		redisInstances          = flag.String("redis.instances", "", "Semicolon-separated list of comma-separated lists of Redis instances")
		redisConnectTimeout     = flag.Duration("redis.connect.timeout", 3*time.Second, "Redis connect timeout")
		redisReadTimeout        = flag.Duration("redis.read.timeout", 3*time.Second, "Redis read timeout")
		redisWriteTimeout       = flag.Duration("redis.write.timeout", 3*time.Second, "Redis write timeout")
		redisMCPI               = flag.Int("redis.mcpi", 2, "Max connections per Redis instance")
		redisHash               = flag.String("redis.hash", "murmur3", "Redis hash function: murmur3, fnv, fnva")
		selectGap               = flag.Duration("select.gap", 0*time.Millisecond, "delay between pipeline read invocations when Selecting over multiple keys")
		maxSize                 = flag.Int("max.size", 10000, "Maximum number of events per key")
		batchSize               = flag.Int("batch.size", 100, "keys to select per request")
		maxKeysPerSecond        = flag.Int64("max.keys.per.second", 1000, "max keys per second to walk")
		scanLogInterval         = flag.Duration("scan.log.interval", 5*time.Second, "how often to report scan rates in log")
		once                    = flag.Bool("once", false, "walk entire keyspace once and exit (default false, walk forever)")
		statsdAddress           = flag.String("statsd.address", "", "Statsd address (blank to disable)")
		statsdSampleRate        = flag.Float64("statsd.sample.rate", 0.1, "Statsd sample rate for normal metrics")
		statsdBucketPrefix      = flag.String("statsd.bucket.prefix", "myservice.", "Statsd bucket key prefix, including trailing period")
		prometheusNamespace     = flag.String("prometheus.namespace", "roshiwalker", "Prometheus key namespace, excluding trailing punctuation")
		prometheusMaxSummaryAge = flag.Duration("prometheus.max.summary.age", 10*time.Second, "Prometheus max age for instantaneous histogram data")
		httpAddress             = flag.String("http.address", ":6060", "HTTP listen address (profiling/metrics endpoints only)")
	)
	flag.Parse()
	log.SetOutput(os.Stdout)
	log.SetFlags(log.Lmicroseconds)

	// Validate integer arguments.
	if *maxKeysPerSecond < int64(*batchSize) {
		log.Fatal("max keys per second should be bigger than batch size")
	}

	// Set up instrumentation.
	statter := g2s.Noop()
	if *statsdAddress != "" {
		var err error
		statter, err = g2s.Dial("udp", *statsdAddress)
		if err != nil {
			log.Fatal(err)
		}
	}
	prometheusInstr := prometheus.New(*prometheusNamespace, *prometheusMaxSummaryAge)
	prometheusInstr.Install("/metrics", http.DefaultServeMux)
	instr := instrumentation.NewMultiInstrumentation(
		statsd.New(statter, float32(*statsdSampleRate), *statsdBucketPrefix),
		prometheusInstr,
	)

	// Parse hash function.
	var hashFunc func(string) uint32
	switch strings.ToLower(*redisHash) {
	case "murmur3":
		hashFunc = pool.Murmur3
	case "fnv":
		hashFunc = pool.FNV
	case "fnva":
		hashFunc = pool.FNVa
	default:
		log.Fatalf("unknown hash %q", *redisHash)
	}

	// Set up the clusters.
	clusters, err := farm.ParseFarmString(
		*redisInstances,
		*redisConnectTimeout, *redisReadTimeout, *redisWriteTimeout,
		*redisMCPI,
		hashFunc,
		*maxSize,
		*selectGap,
		instr,
	)
	if err != nil {
		log.Fatal(err)
	}

	// HTTP server for profiling.
	go func() { log.Print(http.ListenAndServe(*httpAddress, nil)) }()

	// Set up our rate limiter. Remember: it's per-key, not per-request.
	var (
		freq   = time.Duration(1/(*maxKeysPerSecond)) * time.Second
		bucket = tb.NewBucket(*maxKeysPerSecond, freq)
	)

	// Build the farm.
	var (
		readStrategy   = farm.SendAllReadAll
		repairStrategy = farm.AllRepairs // blocking
		writeQuorum    = len(clusters)   // 100%
		dst            = farm.New(clusters, writeQuorum, readStrategy, repairStrategy, instr)
	)

	// Perform the walk.
	defer func(t time.Time) { log.Printf("total walk complete, %s", time.Since(t)) }(time.Now())
	for {
		src := scan(clusters, *batchSize, *scanLogInterval) // new key set
		walkOnce(dst, bucket, src, *maxSize, instr)
		if *once {
			break
		}
	}
}
Beispiel #25
0
func main() {
	var (
		redisInstances             = flag.String("redis.instances", "", "Semicolon-separated list of comma-separated lists of Redis instances")
		redisConnectTimeout        = flag.Duration("redis.connect.timeout", 3*time.Second, "Redis connect timeout")
		redisReadTimeout           = flag.Duration("redis.read.timeout", 3*time.Second, "Redis read timeout")
		redisWriteTimeout          = flag.Duration("redis.write.timeout", 3*time.Second, "Redis write timeout")
		redisMCPI                  = flag.Int("redis.mcpi", 10, "Max connections per Redis instance")
		redisHash                  = flag.String("redis.hash", "murmur3", "Redis hash function: murmur3, fnv, fnva")
		farmWriteQuorum            = flag.String("farm.write.quorum", "51%", "Write quorum, either number of clusters (2) or percentage of clusters (51%)")
		farmReadStrategy           = flag.String("farm.read.strategy", "SendAllReadAll", "Farm read strategy: SendAllReadAll, SendOneReadOne, SendAllReadFirstLinger, SendVarReadFirstLinger")
		farmReadThresholdRate      = flag.Int("farm.read.threshold.rate", 2000, "Baseline SendAll keys read per sec, additional keys are SendOne (SendVarReadFirstLinger strategy only)")
		farmReadThresholdLatency   = flag.Duration("farm.read.threshold.latency", 50*time.Millisecond, "If a SendOne read has not returned anything after this latency, it's promoted to SendAll (SendVarReadFirstLinger strategy only)")
		farmRepairStrategy         = flag.String("farm.repair.strategy", "RateLimitedRepairs", "Farm repair strategy: AllRepairs, NoRepairs, RateLimitedRepairs")
		farmRepairMaxKeysPerSecond = flag.Int("farm.repair.max.keys.per.second", 1000, "Max repaired keys per second (RateLimited repairer only)")
		maxSize                    = flag.Int("max.size", 10000, "Maximum number of events per key")
		selectGap                  = flag.Duration("select.gap", 0*time.Millisecond, "delay between pipeline read invocations when Selecting over multiple keys")
		statsdAddress              = flag.String("statsd.address", "", "Statsd address (blank to disable)")
		statsdSampleRate           = flag.Float64("statsd.sample.rate", 0.1, "Statsd sample rate for normal metrics")
		statsdBucketPrefix         = flag.String("statsd.bucket.prefix", "myservice.", "Statsd bucket key prefix, including trailing period")
		prometheusNamespace        = flag.String("prometheus.namespace", "roshiserver", "Prometheus key namespace, excluding trailing punctuation")
		prometheusMaxSummaryAge    = flag.Duration("prometheus.max.summary.age", 10*time.Second, "Prometheus max age for instantaneous histogram data")
		httpAddress                = flag.String("http.address", ":6302", "HTTP listen address")
	)
	flag.Parse()
	log.SetOutput(os.Stdout)
	log.SetFlags(log.Lmicroseconds)
	log.Printf("GOMAXPROCS %d", runtime.GOMAXPROCS(-1))

	// Set up statsd instrumentation, if it's specified.
	statter := g2s.Noop()
	if *statsdAddress != "" {
		var err error
		statter, err = g2s.Dial("udp", *statsdAddress)
		if err != nil {
			log.Fatal(err)
		}
	}
	prometheusInstr := prometheus.New(*prometheusNamespace, *prometheusMaxSummaryAge)
	prometheusInstr.Install("/metrics", http.DefaultServeMux)
	instr := instrumentation.NewMultiInstrumentation(
		statsd.New(statter, float32(*statsdSampleRate), *statsdBucketPrefix),
		prometheusInstr,
	)

	// Parse read strategy.
	var readStrategy farm.ReadStrategy
	switch strings.ToLower(*farmReadStrategy) {
	case "sendallreadall":
		readStrategy = farm.SendAllReadAll
	case "sendonereadone":
		readStrategy = farm.SendOneReadOne
	case "sendallreadfirstlinger":
		readStrategy = farm.SendAllReadFirstLinger
	case "sendvarreadfirstlinger":
		readStrategy = farm.SendVarReadFirstLinger(*farmReadThresholdRate, *farmReadThresholdLatency)
	default:
		log.Fatalf("unknown read strategy %q", *farmReadStrategy)
	}
	log.Printf("using %s read strategy", *farmReadStrategy)

	// Parse repair strategy. Note that because this is a client-facing
	// production server, all repair strategies get a Nonblocking wrapper!
	repairRequestBufferSize := 100
	var repairStrategy farm.RepairStrategy
	switch strings.ToLower(*farmRepairStrategy) {
	case "allrepairs":
		repairStrategy = farm.Nonblocking(repairRequestBufferSize, farm.AllRepairs)
	case "norepairs":
		repairStrategy = farm.Nonblocking(repairRequestBufferSize, farm.NoRepairs)
	case "ratelimitedrepairs":
		repairStrategy = farm.Nonblocking(repairRequestBufferSize, farm.RateLimited(*farmRepairMaxKeysPerSecond, farm.AllRepairs))
	default:
		log.Fatalf("unknown repair strategy %q", *farmRepairStrategy)
	}
	log.Printf("using %s repair strategy", *farmRepairStrategy)

	// Parse hash function.
	var hashFunc func(string) uint32
	switch strings.ToLower(*redisHash) {
	case "murmur3":
		hashFunc = pool.Murmur3
	case "fnv":
		hashFunc = pool.FNV
	case "fnva":
		hashFunc = pool.FNVa
	default:
		log.Fatalf("unknown hash %q", *redisHash)
	}

	// Build the farm.
	farm, err := newFarm(
		*redisInstances,
		*farmWriteQuorum,
		*redisConnectTimeout, *redisReadTimeout, *redisWriteTimeout,
		*redisMCPI,
		hashFunc,
		readStrategy,
		repairStrategy,
		*maxSize,
		*selectGap,
		instr,
	)
	if err != nil {
		log.Fatal(err)
	}

	// Build the HTTP server.
	r := pat.New()
	r.Add("GET", "/metrics", http.DefaultServeMux)
	r.Add("GET", "/debug", http.DefaultServeMux)
	r.Add("POST", "/debug", http.DefaultServeMux)
	r.Get("/", handleSelect(farm))
	r.Post("/", handleInsert(farm))
	r.Delete("/", handleDelete(farm))
	h := http.Handler(r)

	// Go for it.
	log.Printf("listening on %s", *httpAddress)
	log.Fatal(http.ListenAndServe(*httpAddress, h))
}
Beispiel #26
0
func main() {
	s, err := g2s.Dial("udp", *statsDEndpoint)
	if err != nil {
		log.Fatalf("no transport to statsD endpoint %v: %v", *statsDEndpoint, err)
	}

	corpus, err := quality.ReadTsvCorpus(*inputFile)
	if err != nil {
		log.Fatalf("could not read corpus: %v", err)
	}

	type HTTPAPIResponse struct {
		Docs []quality.Doc `json:"docs"`
	}

	decoder := func(jsn []byte) ([]quality.Doc, error) {
		var response HTTPAPIResponse
		if err := json.Unmarshal(jsn, &response); err != nil {
			return []quality.Doc{}, fmt.Errorf("could not converted from json: %s")
		}

		return response.Docs, nil
	}

	api := quality.HTTPAPI{
		Site:    *searchSite,
		Path:    *searchPath,
		Decoder: decoder,
	}

	results, err := quality.Crawl(api, corpus, *concurrency, *retries)
	if err != nil {
		log.Fatalf("crawl aborted: %v", err)
	}

	fmt.Println()
	log.Printf("completed crawl with %v queries", len(results))

	es := quality.Evaluators{
		"mrr":            quality.MRR,
		"map":            quality.MAP,
		"precision-at-1": quality.PrecisionAtK(1),
		"precision-at-2": quality.PrecisionAtK(2),
		"precision-at-3": quality.PrecisionAtK(3),
		"precision-at-4": quality.PrecisionAtK(4),
		"precision-at-5": quality.PrecisionAtK(5),
	}

	for name, e := range es {
		summary, err := e(results, corpus)
		if err != nil {
			log.Fatalf("could not evaluate %v: %v", name, err)
		}

		key := fmt.Sprintf("%v.%v", *statsDNS, name)
		value := fmt.Sprintf("%.5f", summary)

		log.Printf("%v: %v", key, value)
		s.Gauge(1.0, key, value)
	}

	log.Printf("done.")
}
Beispiel #27
0
func main() {
	// Parse command-line flags for this system.
	var (
		listenAddress    = flag.String("addr", "", "Address to listen to incoming requests on.")
		ldapAddress      = flag.String("ldapAddr", "", "Address to connect to LDAP.")
		ldapBindDN       = flag.String("ldapBindDN", "", "LDAP DN to bind to for login.")
		ldapInsecure     = flag.Bool("insecureLDAP", false, "INSECURE: Don't use TLS for LDAP connection.")
		ldapBindPassword = flag.String("ldapBindPassword", "", "LDAP password for bind.")
		statsdHost       = flag.String("statsHost", "", "Address to send statsd metrics to.")
		iamAccount       = flag.String("iamaccount", "", "AWS Account ID for generating IAM Role ARNs")
		enableLDAPRoles  = flag.Bool("ldaproles", false, "Enable role support using LDAP directory.")
		roleAttribute    = flag.String("roleattribute", "", "Group attribute to get role from.")
		defaultRole      = flag.String("role", "", "AWS role to assume by default.")
		configFile       = flag.String("conf", "/etc/hologram/server.json", "Config file to load.")
		cacheTimeout     = flag.Int("cachetime", 3600, "Time in seconds after which to refresh LDAP user cache.")
		debugMode        = flag.Bool("debug", false, "Enable debug mode.")
		config           Config
	)

	flag.Parse()

	// Enable debug log output if the user requested it.
	if *debugMode {
		log.DebugMode(true)
		log.Debug("Enabling debug log output. Use sparingly.")
	}

	// Parse in options from the given config file.
	log.Debug("Loading configuration from %s", *configFile)
	configContents, configErr := ioutil.ReadFile(*configFile)
	if configErr != nil {
		log.Errorf("Could not read from config file. The error was: %s", configErr.Error())
		os.Exit(1)
	}

	configParseErr := json.Unmarshal(configContents, &config)
	if configParseErr != nil {
		log.Errorf("Error in parsing config file: %s", configParseErr.Error())
		os.Exit(1)
	}

	// Merge in command flag options.
	if *ldapAddress != "" {
		config.LDAP.Host = *ldapAddress
	}

	if *ldapInsecure {
		config.LDAP.InsecureLDAP = true
	}

	if *ldapBindDN != "" {
		config.LDAP.Bind.DN = *ldapBindDN
	}

	if *ldapBindPassword != "" {
		config.LDAP.Bind.Password = *ldapBindPassword
	}

	if *statsdHost != "" {
		config.Stats = *statsdHost
	}

	if *iamAccount != "" {
		config.AWS.Account = *iamAccount
	}

	if *listenAddress != "" {
		config.Listen = *listenAddress
	}

	if *defaultRole != "" {
		config.AWS.DefaultRole = *defaultRole
	}

	if *enableLDAPRoles {
		config.LDAP.EnableLDAPRoles = true
	}

	if *roleAttribute != "" {
		config.LDAP.RoleAttribute = *roleAttribute
	}

	if *cacheTimeout != 3600 {
		config.CacheTimeout = *cacheTimeout
	}

	var stats g2s.Statter
	var statsErr error

	if config.LDAP.UserAttr == "" {
		config.LDAP.UserAttr = "cn"
	}

	if config.Stats == "" {
		log.Debug("No statsd server specified; no metrics will be emitted by this program.")
		stats = g2s.Noop()
	} else {
		stats, statsErr = g2s.Dial("udp", config.Stats)
		if statsErr != nil {
			log.Errorf("Error connecting to statsd: %s. No metrics will be emitted by this program.", statsErr.Error())
			stats = g2s.Noop()
		} else {
			log.Debug("This program will emit metrics to %s", config.Stats)
		}
	}

	// Setup the server state machine that responds to requests.
	auth, err := aws.GetAuth(os.Getenv("HOLOGRAM_AWSKEY"), os.Getenv("HOLOGRAM_AWSSECRET"), "", time.Now())
	if err != nil {
		log.Errorf("Error getting instance credentials: %s", err.Error())
		os.Exit(1)
	}

	stsConnection := sts.New(auth, aws.Regions["us-east-1"])
	credentialsService := server.NewDirectSessionTokenService(config.AWS.Account, stsConnection)

	var ldapServer *ldap.Conn

	// Connect to the LDAP server using TLS or not depending on the config
	if config.LDAP.InsecureLDAP {
		log.Debug("Connecting to LDAP at server %s (NOT using TLS).", config.LDAP.Host)
		ldapServer, err = ldap.Dial("tcp", config.LDAP.Host)
		if err != nil {
			log.Errorf("Could not dial LDAP! %s", err.Error())
			os.Exit(1)
		}
	} else {
		// Connect to the LDAP server with sample credentials.
		tlsConfig := &tls.Config{
			InsecureSkipVerify: true,
		}

		log.Debug("Connecting to LDAP at server %s.", config.LDAP.Host)
		ldapServer, err = ldap.DialTLS("tcp", config.LDAP.Host, tlsConfig)
		if err != nil {
			log.Errorf("Could not dial LDAP! %s", err.Error())
			os.Exit(1)
		}
	}

	if bindErr := ldapServer.Bind(config.LDAP.Bind.DN, config.LDAP.Bind.Password); bindErr != nil {
		log.Errorf("Could not bind to LDAP! %s", bindErr.Error())
		os.Exit(1)
	}

	ldapCache, err := server.NewLDAPUserCache(ldapServer, stats, config.LDAP.UserAttr, config.LDAP.BaseDN, config.LDAP.EnableLDAPRoles, config.LDAP.RoleAttribute)
	if err != nil {
		log.Errorf("Top-level error in LDAPUserCache layer: %s", err.Error())
		os.Exit(1)
	}

	serverHandler := server.New(ldapCache, credentialsService, config.AWS.DefaultRole, stats, ldapServer, config.LDAP.UserAttr, config.LDAP.BaseDN, config.LDAP.EnableLDAPRoles)
	server, err := remote.NewServer(config.Listen, serverHandler.HandleConnection)

	// Wait for a signal from the OS to shutdown.
	terminate := make(chan os.Signal)
	signal.Notify(terminate, syscall.SIGINT, syscall.SIGTERM)

	// SIGUSR1 and SIGUSR2 should make Hologram enable and disable debug logging,
	// respectively.
	debugEnable := make(chan os.Signal)
	debugDisable := make(chan os.Signal)
	signal.Notify(debugEnable, syscall.SIGUSR1)
	signal.Notify(debugDisable, syscall.SIGUSR2)

	// SIGHUP should make Hologram server reload its cache of user information
	// from LDAP.
	reloadCacheSigHup := make(chan os.Signal)
	signal.Notify(reloadCacheSigHup, syscall.SIGHUP)

	// Reload the cache based on time set in configuration
	cacheTimeoutTicker := time.NewTicker(time.Duration(config.CacheTimeout) * time.Second)

	log.Info("Hologram server is online, waiting for termination.")

WaitForTermination:
	for {
		select {
		case <-terminate:
			break WaitForTermination
		case <-debugEnable:
			log.Info("Enabling debug mode.")
			log.DebugMode(true)
		case <-debugDisable:
			log.Info("Disabling debug mode.")
			log.DebugMode(false)
		case <-reloadCacheSigHup:
			log.Info("Force-reloading user cache.")
			ldapCache.Update()
		case <-cacheTimeoutTicker.C:
			log.Info("Cache timeout. Reloading user cache.")
			ldapCache.Update()
		}
	}

	log.Info("Caught signal; shutting down now.")
	server.Close()
}