func New(inst string) {
	instance = inst

	exp.Exp(metrics.DefaultRegistry)

	expvar.NewString("instance").Set(instance)
	expvar.NewString("service").Set(service)
}
Beispiel #2
0
func init() {
	expvar.NewString("goVersion").Set(runtime.Version())
	expvar.NewString("iconVersion").Set(besticon.VersionString)

	expvar.NewString("timeLastDeploy").Set(parseUnixTimeStamp(os.Getenv("DEPLOYED_AT")).String())
	expvar.NewString("timeStartup").Set(time.Now().String())
	expvar.Publish("timeCurrent", expvar.Func(func() interface{} { return time.Now() }))
}
Beispiel #3
0
func main() {

	port := flag.Int("p", 8080, "port to listen on")
	input := flag.String("f", "", "file with signatures to load")
	useVPTree := flag.Bool("vptree", true, "load vptree")
	useStore := flag.Bool("store", true, "load simstore")
	storeSize := flag.Int("size", 6, "simstore size (3/6)")
	cpus := flag.Int("cpus", runtime.NumCPU(), "value of GOMAXPROCS")
	myNumber := flag.Int("no", 0, "id of this machine")
	totalMachines := flag.Int("of", 1, "number of machines to distribute the table among")
	small := flag.Bool("small", false, "use small memory for size 3")

	flag.Parse()

	expvar.NewString("BuildVersion").Set(BuildVersion)

	log.Println("starting simd", BuildVersion)

	log.Println("setting GOMAXPROCS=", *cpus)
	runtime.GOMAXPROCS(*cpus)

	if *input == "" {
		log.Fatalln("no import hash list provided (-f)")
	}

	err := loadConfig(*input, *useStore, *storeSize, *small, *useVPTree, *myNumber, *totalMachines)
	if err != nil {
		log.Fatalln("unable to load config:", err)
	}

	if *useStore {
		http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { searchHandler(w, r) })
	}

	if *useVPTree {
		http.HandleFunc("/topk", func(w http.ResponseWriter, r *http.Request) { topkHandler(w, r) })
	}

	go func() {
		sigs := make(chan os.Signal)
		signal.Notify(sigs, syscall.SIGHUP)

		for {
			select {
			case <-sigs:
				log.Println("caught SIGHUP, reloading")

				err := loadConfig(*input, *useStore, *storeSize, *small, *useVPTree, *myNumber, *totalMachines)
				if err != nil {
					log.Println("reload failed: ignoring:", err)
					break
				}
			}
		}

	}()

	log.Println("listening on port", *port)
	log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil))
}
func main() {
	flag.Parse()
	expvar.NewString("listen_port").Set(*listenPort)

	stdout := log.New(os.Stdout, "", log.Ldate|log.Ltime)
	stderr := log.New(os.Stderr, "", log.Ldate|log.Ltime)

	http.Handle("/msg", idHandler(stdout, stderr))

	logMessage(stdout,
		&Message{
			Source: &PROG,
			Key:    &STATUS,
			Content: map[string]interface{}{
				"status": "listening",
				"port":   *listenPort,
			},
		})

	err := http.ListenAndServe(fmt.Sprintf(":%s", *listenPort), nil)
	if err != nil {
		logMessage(stderr,
			&Message{
				Source: &PROG,
				Key:    &ERR,
				Content: map[string]interface{}{
					"error": fmt.Sprintf("error starting to listen: %s", err),
				},
			})
		os.Exit(1)
	}
}
Beispiel #5
0
func init() {
	expvar.NewString("service.startTime").Set(time.Now().String())

	// Suppress usage output. Any errors will be complained about when the user
	// parses flags anyway.
	fs.Usage = func() {}
	fs.SetOutput(nullWriter{})
}
func init() {
	currentTar = expvar.NewString("CurrentTar")
	tarBytesRead = expvar.NewInt("TarBytesRead")
	tarsFailed = expvar.NewInt("TarsFailed")
	tarsIndexed = expvar.NewInt("TarsIndexed")
	tarsSkipped = expvar.NewInt("TarsSkipped")
	tracesFailed = expvar.NewInt("TracesFailed")
	tracesIndexed = expvar.NewInt("TracesIndexed")
}
Beispiel #7
0
//NewMetrics creates
//Metrics object
func NewMetrics() *Metrics {
	metrics = &Metrics{
		true,
		expvar.NewInt("comparisons"),
		expvar.NewInt("errors"),
		expvar.NewString("database"),
	}
	return metrics
}
Beispiel #8
0
func init() {
	expvar.NewInt("NumCPUs").Set(int64(runtime.NumCPU()))

	revision, err := exec.Command("git", "log", "-1", "--pretty=oneline", "HEAD").Output()
	if err != nil {
		expvar.NewString("revision").Set(fmt.Sprintf("Could not determine git version: %s", err))
	} else {
		expvar.NewString("revision").Set(strings.TrimSpace(string(revision)))
	}

	env := expvar.NewMap("env")
	for _, val := range os.Environ() {
		parts := strings.SplitN(val, "=", 2)
		if len(parts) >= 2 {
			env.Set(parts[0], exposedString{parts[1]})
		}
	}
}
Beispiel #9
0
func main() {

	Whispers = whispers{metrics: make(map[string]*carbonmem.Whisper)}

	flag.IntVar(&Whispers.windowSize, "w", 600, "window size")
	flag.IntVar(&Whispers.epochSize, "e", 60, "epoch window size")
	flag.IntVar(&Whispers.epoch0, "epoch0", 0, "epoch0")
	flag.IntVar(&Whispers.prefix, "prefix", 0, "prefix nodes to shard on")

	port := flag.Int("p", 8001, "port to listen on (http)")
	gport := flag.Int("gp", 2003, "port to listen on (graphite)")
	verbose := flag.Bool("v", false, "verbose logging")
	logdir := flag.String("logdir", "/var/log/carbonmem/", "logging directory")
	logtostdout := flag.Bool("stdout", false, "log also to stdout")

	flag.Parse()

	rl := rotatelogs.NewRotateLogs(
		*logdir + "/carbonmem.%Y%m%d%H%M.log",
	)

	// Optional fields must be set afterwards
	rl.LinkName = *logdir + "/carbonmem.log"

	if *logtostdout {
		log.SetOutput(io.MultiWriter(os.Stdout, rl))
	} else {
		log.SetOutput(rl)
	}

	expvar.NewString("BuildVersion").Set(BuildVersion)
	log.Println("starting carbonmem", BuildVersion)

	expvar.Publish("Whispers", expvar.Func(func() interface{} {
		m := make(map[string]int)
		Whispers.RLock()
		for k, v := range Whispers.metrics {
			m[k] = v.Len()
		}
		Whispers.RUnlock()
		return m
	}))

	if Whispers.epoch0 == 0 {
		Whispers.epoch0 = int(time.Now().Unix())
	}

	go graphiteServer(*gport)

	http.HandleFunc("/metrics/find/", accessHandler(*verbose, findHandler))
	http.HandleFunc("/render/", accessHandler(*verbose, renderHandler))

	log.Println("http server starting on port", *port)
	log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil))
}
Beispiel #10
0
func exportBinaryVersion() {
	hasher := md5.New()
	exeFile, err := os.Open("/proc/self/exe")
	if err != nil {
		panic(err)
	}
	if _, err = io.Copy(hasher, exeFile); err != nil {
		panic(err)
	}
	md5sum := hex.EncodeToString(hasher.Sum(nil))
	fileInfo, err := exeFile.Stat()
	if err != nil {
		panic(err)
	}
	mtime := fileInfo.ModTime().Format(time.RFC3339)
	version := mtime + " " + md5sum
	expvar.NewString("binary-version").Set(version)
	// rexport this value for varz scraper
	expvar.NewString("Version").Set(version)
}
Beispiel #11
0
func NewRuntimeWare(prefixes []string, trackPageview bool, logInterval ...time.Duration) Middleware {
	expvar.NewString("at_server_start").Set(time.Now().Format("2006-01-02 15:04:05"))
	expvar.NewInt("cpu_count").Set(int64(runtime.NumCPU()))
	ware := &RuntimeWare{
		serverStarted: time.Now(),
		trackPageview: trackPageview,
		ignoredUrls:   prefixes,
		cQps:          ratecounter.NewRateCounter(time.Minute),
		c4xx:          ratecounter.NewRateCounter(5 * time.Minute),
		c5xx:          ratecounter.NewRateCounter(5 * time.Minute),
		lc:            NewLatencyCounter(50),
		hitsTotal:     expvar.NewInt("hits_total"),
		hitsQps:       expvar.NewInt("hits_per_minute"),
		hits4xx:       expvar.NewInt("hits_4xx_per_5min"),
		hits5xx:       expvar.NewInt("hits_5xx_per_5min"),
		hitsServed:    expvar.NewString("latency_recent"),
		hitsLatMax:    expvar.NewString("latency_max"),
		hitsLatMin:    expvar.NewString("latency_min"),
		hitsLat95:     expvar.NewString("latency_p95"),
		hitsLat50:     expvar.NewString("latency_p50"),
		numGoroutine:  expvar.NewInt("goroutine_count"),
	}
	if trackPageview {
		ware.pageviews = expvar.NewMap("hits_pageviews")
	}
	if len(logInterval) > 0 && logInterval[0] > 0 {
		go ware.logSnapshot(logInterval[0])
	}
	return ware
}
Beispiel #12
0
func init() {
	registry := metrics.NewPrefixedChildRegistry(metrics.DefaultRegistry, "daemon_")
	registry.Register("open_fds", openFDs)
	registry.Register("goroutines", goroutines)
	registry.Register("threads", threads)
	registry.Register("connections", conns)

	registry.Register("hc_openfd", metrics.NewHealthcheck(fdHealthCheck))
	registry.Register("hc_threads", metrics.NewHealthcheck(threadHealthCheck))

	http.Handle("/metrics", exportmetrics.HTTPExport(metrics.DefaultRegistry))

	expvar.NewString("version_info").Set(fmt.Sprintf("version:%s hash:%s build:%s tag:%s",
		version.Version, version.GitHash, version.Build, version.GitTag))
}
Beispiel #13
0
func init() {
	currentTar = expvar.NewString("CurrentTar")
	tarBytesRead = expvar.NewInt("TarBytesRead")
	tarsFailed = expvar.NewInt("TarsFailed")
	tarsIndexed = expvar.NewInt("TarsIndexed")
	tarsSkipped = expvar.NewInt("TarsSkipped")
	statsFailed = expvar.NewInt("StatsFailed")
	statsIndexed = expvar.NewInt("StatsIndexed")

	timestampActiveSkipped = expvar.NewInt("TimestampActiveSkipped")
	timestampBismarkExperimentsManagerSkipped = expvar.NewInt("TimestampBismarkExperimentsManagerSkipped")
	timestampBismarkUpdaterSkipped = expvar.NewInt("TimestampBismarkUpdaterSkipped")
	timestampHealthSkipped = expvar.NewInt("TimestampHealthSkipped")
	timestampMacAnalyzerSkipped = expvar.NewInt("TimestampMacAnalyzerSkipped")
	timestampPassiveSkipped = expvar.NewInt("TimestampPassiveSkipped")
	timestampPassiveFrequentSkipped = expvar.NewInt("TimestampPassiveFrequentSkipped")
	timestampOtherSkipped = expvar.NewInt("TimestampOtherSkipped")
}
Beispiel #14
0
func main() {
	var inerInt int64 = 10
	pubInt := expvar.NewInt("Int")
	pubInt.Set(inerInt)
	pubInt.Add(2)

	var inerFloat float64 = 1.2
	pubFloat := expvar.NewFloat("Float")
	pubFloat.Set(inerFloat)
	pubFloat.Add(0.1)

	var inerString string = "hello gophers"
	pubString := expvar.NewString("String")
	pubString.Set(inerString)

	pubMap := expvar.NewMap("Map").Init()
	pubMap.Set("Int", pubInt)
	pubMap.Set("Float", pubFloat)
	pubMap.Set("String", pubString)
	pubMap.Add("Int", 1)
	pubMap.Add("NewInt", 123)
	pubMap.AddFloat("Float", 0.5)
	pubMap.AddFloat("NewFloat", 0.9)
	pubMap.Do(kvfunc)

	expvar.Do(kvfunc)

	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprint(w, "hello gophers")
	})
	err := http.ListenAndServe(":8080", nil)
	if err != nil {
		panic(err)
	}

}
Beispiel #15
0
package main

import (
	"expvar"
	"github.com/duego/cryriver/mongodb"
	"log"
	"os"
	"time"
)

var (
	lastEsSeen     *mongodb.Timestamp
	lastEsSeenC    = make(chan *mongodb.Timestamp, 1)
	lastEsSeenStat = expvar.NewString("Last optime seen")
)

func init() {
	// Restore any previously saved timestamp
	lastEsSeen = new(mongodb.Timestamp)
	if f, err := os.Open(*optimeStore); err != nil {
		log.Println("Failed to load previous lastEsSeen timestamp:", err)
	} else {
		lastEsSeen.Load(f)
		f.Close()
	}
	go saveLastEsSeen()
}

// saveLastEsSeen loops the channel to save our progress on what timestamp we have seen so far.
// It will be flushed to disk when our timer ticks.
func saveLastEsSeen() {
Beispiel #16
0
func main() {

	z := flag.String("z", "", "zipper")
	port := flag.Int("p", 8080, "port")
	l := flag.Int("l", 20, "concurrency limit")
	cacheType := flag.String("cache", "mem", "cache type to use")
	mc := flag.String("mc", "", "comma separated memcached server list")
	memsize := flag.Int("memsize", 0, "in-memory cache size in MB (0 is unlimited)")
	cpus := flag.Int("cpus", 0, "number of CPUs to use")
	tz := flag.String("tz", "", "timezone,offset to use for dates with no timezone")
	graphiteHost := flag.String("graphite", "", "graphite destination host")
	logdir := flag.String("logdir", "/var/log/carbonapi/", "logging directory")
	logtostdout := flag.Bool("stdout", false, "log also to stdout")
	interval := flag.Duration("i", 60*time.Second, "interval to report internal statistics to graphite")
	idleconns := flag.Int("idleconns", 10, "max idle connections")
	pidFile := flag.String("pid", "", "pidfile (default: empty, don't create pidfile)")

	flag.Parse()

	if *logdir == "" {
		mlog.SetRawStream(os.Stdout)
	} else {
		mlog.SetOutput(*logdir, "carbonapi", *logtostdout)
	}

	expvar.NewString("BuildVersion").Set(BuildVersion)
	logger.Logln("starting carbonapi", BuildVersion)

	if p := os.Getenv("PORT"); p != "" {
		*port, _ = strconv.Atoi(p)
	}

	Limiter = newLimiter(*l)

	if *z == "" {
		logger.Fatalln("no zipper provided")
	}

	if _, err := url.Parse(*z); err != nil {
		logger.Fatalln("unable to parze zipper:", err)
	}

	logger.Logln("using zipper", *z)
	Zipper = zipper{
		z: *z,
		client: &http.Client{
			Transport: &http.Transport{
				MaxIdleConnsPerHost: *idleconns,
			},
		},
	}

	switch *cacheType {
	case "memcache":
		if *mc == "" {
			logger.Fatalln("memcache cache requested but no memcache servers provided")
		}

		servers := strings.Split(*mc, ",")
		logger.Logln("using memcache servers:", servers)
		queryCache = &memcachedCache{client: memcache.New(servers...)}
		findCache = &memcachedCache{client: memcache.New(servers...)}

	case "mem":
		qcache := &expireCache{ec: ecache.New(uint64(*memsize * 1024 * 1024))}
		queryCache = qcache
		go queryCache.(*expireCache).ec.ApproximateCleaner(10 * time.Second)

		findCache = &expireCache{ec: ecache.New(0)}
		go findCache.(*expireCache).ec.ApproximateCleaner(10 * time.Second)

		Metrics.CacheSize = expvar.Func(func() interface{} {
			return qcache.ec.Size()
		})
		expvar.Publish("cache_size", Metrics.CacheSize)

		Metrics.CacheItems = expvar.Func(func() interface{} {
			return qcache.ec.Items()
		})
		expvar.Publish("cache_items", Metrics.CacheItems)

	case "null":
		queryCache = &nullCache{}
		findCache = &nullCache{}
	}

	if *tz != "" {
		fields := strings.Split(*tz, ",")
		if len(fields) != 2 {
			logger.Fatalf("expected two fields for tz,seconds, got %d", len(fields))
		}

		var err error
		offs, err := strconv.Atoi(fields[1])
		if err != nil {
			logger.Fatalf("unable to parse seconds: %s: %s", fields[1], err)
		}

		defaultTimeZone = time.FixedZone(fields[0], offs)
		logger.Logf("using fixed timezone %s, offset %d ", defaultTimeZone.String(), offs)
	}

	if *cpus != 0 {
		logger.Logln("using GOMAXPROCS", *cpus)
		runtime.GOMAXPROCS(*cpus)
	}

	if envhost := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); envhost != ":" || *graphiteHost != "" {

		var host string

		switch {
		case envhost != ":" && *graphiteHost != "":
			host = *graphiteHost
		case envhost != ":":
			host = envhost
		case *graphiteHost != "":
			host = *graphiteHost
		}

		logger.Logln("Using graphite host", host)

		logger.Logln("setting stats interval to", *interval)

		// register our metrics with graphite
		graphite := g2g.NewGraphite(host, *interval, 10*time.Second)

		hostname, _ := os.Hostname()
		hostname = strings.Replace(hostname, ".", "_", -1)

		graphite.Register(fmt.Sprintf("carbon.api.%s.requests", hostname), Metrics.Requests)
		graphite.Register(fmt.Sprintf("carbon.api.%s.request_cache_hits", hostname), Metrics.RequestCacheHits)

		graphite.Register(fmt.Sprintf("carbon.api.%s.find_requests", hostname), Metrics.FindRequests)
		graphite.Register(fmt.Sprintf("carbon.api.%s.find_cache_hits", hostname), Metrics.FindCacheHits)

		graphite.Register(fmt.Sprintf("carbon.api.%s.render_requests", hostname), Metrics.RenderRequests)

		graphite.Register(fmt.Sprintf("carbon.api.%s.memcache_timeouts", hostname), Metrics.MemcacheTimeouts)

		if Metrics.CacheSize != nil {
			graphite.Register(fmt.Sprintf("carbon.api.%s.cache_size", hostname), Metrics.CacheSize)
			graphite.Register(fmt.Sprintf("carbon.api.%s.cache_items", hostname), Metrics.CacheItems)
		}

		go mstats.Start(*interval)

		graphite.Register(fmt.Sprintf("carbon.api.%s.alloc", hostname), &mstats.Alloc)
		graphite.Register(fmt.Sprintf("carbon.api.%s.total_alloc", hostname), &mstats.TotalAlloc)
		graphite.Register(fmt.Sprintf("carbon.api.%s.num_gc", hostname), &mstats.NumGC)
		graphite.Register(fmt.Sprintf("carbon.api.%s.pause_ns", hostname), &mstats.PauseNS)

	}

	render := func(w http.ResponseWriter, r *http.Request) {
		var stats renderStats
		t0 := time.Now()
		renderHandler(w, r, &stats)
		since := time.Since(t0)
		logger.Logln(r.RequestURI, since.Nanoseconds()/int64(time.Millisecond), stats.zipperRequests)
	}

	if *pidFile != "" {
		pidfile.SetPidfilePath(*pidFile)
		err := pidfile.Write()
		if err != nil {
			logger.Fatalln("error during pidfile.Write():", err)
		}
	}

	r := http.DefaultServeMux
	r.HandleFunc("/render/", render)
	r.HandleFunc("/render", render)

	r.HandleFunc("/metrics/find/", findHandler)
	r.HandleFunc("/metrics/find", findHandler)

	r.HandleFunc("/info/", passthroughHandler)
	r.HandleFunc("/info", passthroughHandler)

	r.HandleFunc("/lb_check", lbcheckHandler)
	r.HandleFunc("/", usageHandler)

	logger.Logln("listening on port", *port)
	handler := handlers.CompressHandler(r)
	handler = handlers.CORS()(handler)
	handler = handlers.CombinedLoggingHandler(mlog.GetOutput(), handler)

	err := gracehttp.Serve(&http.Server{
		Addr:    ":" + strconv.Itoa(*port),
		Handler: handler,
	})

	if err != nil {
		logger.Fatalln(err)
	}
}
Beispiel #17
0
func init() {
	expvar.NewString("BuildID").Set(BuildID)
	expvar.NewString("BuildTime").Set(BuildTime)
}
Beispiel #18
0
func init() {
	versionVar := expvar.NewString("dex.version")
	versionVar.Set(version)
}
Beispiel #19
0
func main() {
	addr := flag.String("a", ":2003", "address to bind to")
	reportaddr := flag.String("reportaddr", ":8080", "address to bind http report interface to")
	verbose := flag.Bool("v", false, "enable verbose logging")
	debug := flag.Bool("vv", false, "enable more verbose (debug) logging")
	whisperdata := flag.String("w", config.WhisperData, "location where whisper files are stored")
	maxprocs := flag.Int("maxprocs", runtime.NumCPU()*80/100, "GOMAXPROCS")
	logdir := flag.String("logdir", "/var/log/carbonwriter/", "logging directory")
	schemafile := flag.String("schemafile", "/etc/carbon/storage-schemas.conf", "storage-schemas.conf location")
	aggrfile := flag.String("aggrfile", "/etc/carbon/storage-aggregation.conf", "storage-aggregation.conf location")
	logtostdout := flag.Bool("stdout", false, "log also to stdout")

	flag.Parse()

	rl := rotatelogs.NewRotateLogs(
		*logdir + "/carbonwriter.%Y%m%d%H%M.log",
	)

	// Optional fields must be set afterwards
	rl.LinkName = *logdir + "/carbonwriter.log"

	if *logtostdout {
		log.SetOutput(io.MultiWriter(os.Stdout, rl))
	} else {
		log.SetOutput(rl)
	}

	expvar.NewString("BuildVersion").Set(BuildVersion)
	log.Println("starting carbonwriter", BuildVersion)

	loglevel := LOG_NORMAL
	if *verbose {
		loglevel = LOG_DEBUG
	}
	if *debug {
		loglevel = LOG_TRACE
	}

	logger = logLevel(loglevel)

	schemas, err := readStorageSchemas(*schemafile)
	if err != nil {
		logger.Logf("failed to read %s: %s", *schemafile, err.Error())
		os.Exit(1)
	}

	aggrs, err := readStorageAggregations(*aggrfile)
	if err != nil {
		logger.Logf("failed to read %s: %s", *aggrfile, err.Error())
		os.Exit(1)
	}

	config.WhisperData = strings.TrimRight(*whisperdata, "/")
	logger.Logf("writing whisper files to: %s", config.WhisperData)
	logger.Logf("reading storage schemas from: %s", *schemafile)
	logger.Logf("reading aggregation rules from: %s", *aggrfile)

	runtime.GOMAXPROCS(*maxprocs)
	logger.Logf("set GOMAXPROCS=%d", *maxprocs)

	httputil.PublishTrackedConnections("httptrack")
	expvar.Publish("requestBuckets", expvar.Func(renderTimeBuckets))

	// +1 to track every over the number of buckets we track
	timeBuckets = make([]int64, config.Buckets+1)

	// nothing in the config? check the environment
	if config.GraphiteHost == "" {
		if host := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); host != ":" {
			config.GraphiteHost = host
		}
	}

	// only register g2g if we have a graphite host
	if config.GraphiteHost != "" {

		logger.Logf("Using graphite host %v", config.GraphiteHost)

		// register our metrics with graphite
		graphite, err := g2g.NewGraphite(config.GraphiteHost, 60*time.Second, 10*time.Second)
		if err != nil {
			log.Fatalf("unable to connect to to graphite: %v: %v", config.GraphiteHost, err)
		}

		hostname, _ := os.Hostname()
		hostname = strings.Replace(hostname, ".", "_", -1)

		//		graphite.Register(fmt.Sprintf("carbon.writer.%s.metricsReceived",
		//			hostname), Metrics.received)

		for i := 0; i <= config.Buckets; i++ {
			graphite.Register(fmt.Sprintf("carbon.writer.%s.write_in_%dms_to_%dms", hostname, i*100, (i+1)*100), bucketEntry(i))
		}
	}

	logger.Logf("listening on %s, statistics via %s", *addr, *reportaddr)
	go listenAndServe(*addr, schemas, aggrs)
	err = http.ListenAndServe(*reportaddr, nil)
	if err != nil {
		log.Fatalf("%s", err)
	}
	logger.Logf("stopped")
}
func main() {

	flag.Usage = usage
	flag.Parse()

	config_file = "/etc/carbon-relay-ng.ini"
	if 1 == flag.NArg() {
		config_file = flag.Arg(0)
	}

	if _, err := toml.DecodeFile(config_file, &config); err != nil {
		log.Error("Cannot use config file '%s':\n", config_file)
		log.Error(err.Error())
		usage()
		return
	}
	//runtime.SetBlockProfileRate(1) // to enable block profiling. in my experience, adds 35% overhead.

	levels := map[string]logging.Level{
		"critical": logging.CRITICAL,
		"error":    logging.ERROR,
		"warning":  logging.WARNING,
		"notice":   logging.NOTICE,
		"info":     logging.INFO,
		"debug":    logging.DEBUG,
	}
	level, ok := levels[config.Log_level]
	if !ok {
		log.Error("unrecognized log level '%s'\n", config.Log_level)
		return
	}
	logging.SetLevel(level, "carbon-relay-ng")
	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if len(config.Instance) == 0 {
		log.Error("instance identifier cannot be empty")
		os.Exit(1)
	}

	runtime.GOMAXPROCS(config.max_procs)

	instance = config.Instance
	expvar.NewString("instance").Set(instance)
	expvar.NewString("service").Set(service)

	log.Notice("===== carbon-relay-ng instance '%s' starting. =====\n", instance)

	numIn = Counter("unit=Metric.direction=in")
	numInvalid = Counter("unit=Err.type=invalid")
	if config.Instrumentation.Graphite_addr != "" {
		addr, err := net.ResolveTCPAddr("tcp", config.Instrumentation.Graphite_addr)
		if err != nil {
			log.Fatal(err)
		}
		go metrics.Graphite(metrics.DefaultRegistry, time.Duration(config.Instrumentation.Graphite_interval)*time.Millisecond, "", addr)
	}

	log.Notice("creating routing table...")
	maxAge, err := time.ParseDuration(config.Bad_metrics_max_age)
	if err != nil {
		log.Error("could not parse badMetrics max age")
		log.Error(err.Error())
		os.Exit(1)
	}
	badMetrics = badmetrics.New(maxAge)
	table = NewTable(config.Spool_dir)
	log.Notice("initializing routing table...")
	for i, cmd := range config.Init {
		log.Notice("applying: %s", cmd)
		err = applyCommand(table, cmd)
		if err != nil {
			log.Error("could not apply init cmd #%d", i+1)
			log.Error(err.Error())
			os.Exit(1)
		}
	}
	tablePrinted := table.Print()
	log.Notice("===========================")
	log.Notice("========== TABLE ==========")
	log.Notice("===========================")
	for _, line := range strings.Split(tablePrinted, "\n") {
		log.Notice(line)
	}

	// Follow the goagain protocol, <https://github.com/rcrowley/goagain>.
	l, ppid, err := goagain.GetEnvs()
	if nil != err {
		laddr, err := net.ResolveTCPAddr("tcp", config.Listen_addr)
		if nil != err {
			log.Error(err.Error())
			os.Exit(1)
		}
		l, err = net.ListenTCP("tcp", laddr)
		if nil != err {
			log.Error(err.Error())

			os.Exit(1)
		}
		log.Notice("listening on %v", laddr)
		go accept(l.(*net.TCPListener), config)
	} else {
		log.Notice("resuming listening on %v", l.Addr())
		go accept(l.(*net.TCPListener), config)
		if err := goagain.KillParent(ppid); nil != err {
			log.Error(err.Error())
			os.Exit(1)
		}
		for {
			err := syscall.Kill(ppid, 0)
			if err != nil {
				break
			}
			time.Sleep(10 * time.Millisecond)
		}
	}

	if config.Admin_addr != "" {
		go func() {
			err := adminListener(config.Admin_addr)
			if err != nil {
				fmt.Println("Error listening:", err.Error())
				os.Exit(1)
			}
		}()
	}

	if config.Http_addr != "" {
		go HttpListener(config.Http_addr, table)
	}

	if err := goagain.AwaitSignals(l); nil != err {
		log.Error(err.Error())
		os.Exit(1)
	}
}
Beispiel #21
0
func main() {

	configFile := flag.String("c", "", "config file (json)")
	port := flag.Int("p", 0, "port to listen on")
	maxprocs := flag.Int("maxprocs", 0, "GOMAXPROCS")
	debugLevel := flag.Int("d", 0, "enable debug logging")
	logtostdout := flag.Bool("stdout", false, "write logging output also to stdout")
	logdir := flag.String("logdir", "/var/log/carbonzipper/", "logging directory")

	flag.Parse()

	expvar.NewString("BuildVersion").Set(BuildVersion)

	if *configFile == "" {
		log.Fatal("missing config file")
	}

	cfgjs, err := ioutil.ReadFile(*configFile)
	if err != nil {
		log.Fatal("unable to load config file:", err)
	}

	cfgjs = stripCommentHeader(cfgjs)

	if cfgjs == nil {
		log.Fatal("error removing header comment from ", *configFile)
	}

	err = json.Unmarshal(cfgjs, &Config)
	if err != nil {
		log.Fatal("error parsing config file: ", err)
	}

	if len(Config.Backends) == 0 {
		log.Fatal("no Backends loaded -- exiting")
	}

	// command line overrides config file

	if *port != 0 {
		Config.Port = *port
	}

	if *maxprocs != 0 {
		Config.MaxProcs = *maxprocs
	}

	// set up our logging

	rl := rotatelogs.NewRotateLogs(
		*logdir + "/carbonzipper.%Y%m%d%H%M.log",
	)

	// Optional fields must be set afterwards
	rl.LinkName = *logdir + "/carbonzipper.log"

	if *logtostdout {
		log.SetOutput(io.MultiWriter(os.Stdout, rl))
	} else {
		log.SetOutput(rl)
	}

	logger = logLevel(*debugLevel)
	logger.Logln("starting carbonzipper", BuildVersion)

	logger.Logln("setting GOMAXPROCS=", Config.MaxProcs)
	runtime.GOMAXPROCS(Config.MaxProcs)

	if Config.ConcurrencyLimitPerServer != 0 {
		logger.Logln("Setting concurrencyLimit", Config.ConcurrencyLimitPerServer)
		Limiter = newServerLimiter(Config.Backends, Config.ConcurrencyLimitPerServer)
	}

	// +1 to track every over the number of buckets we track
	timeBuckets = make([]int64, Config.Buckets+1)

	httputil.PublishTrackedConnections("httptrack")
	expvar.Publish("requestBuckets", expvar.Func(renderTimeBuckets))

	// export config via expvars
	expvar.Publish("Config", expvar.Func(func() interface{} { return Config }))

	http.HandleFunc("/metrics/find/", httputil.TrackConnections(httputil.TimeHandler(findHandler, bucketRequestTimes)))
	http.HandleFunc("/render/", httputil.TrackConnections(httputil.TimeHandler(renderHandler, bucketRequestTimes)))
	http.HandleFunc("/info/", httputil.TrackConnections(httputil.TimeHandler(infoHandler, bucketRequestTimes)))
	http.HandleFunc("/lb_check", lbCheckHandler)

	// nothing in the config? check the environment
	if Config.GraphiteHost == "" {
		if host := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); host != ":" {
			Config.GraphiteHost = host
		}
	}

	// only register g2g if we have a graphite host
	if Config.GraphiteHost != "" {

		logger.Logln("Using graphite host", Config.GraphiteHost)

		// register our metrics with graphite
		graphite, err := g2g.NewGraphite(Config.GraphiteHost, 60*time.Second, 10*time.Second)
		if err != nil {
			log.Fatal("unable to connect to to graphite: ", Config.GraphiteHost, ":", err)
		}

		hostname, _ := os.Hostname()
		hostname = strings.Replace(hostname, ".", "_", -1)

		graphite.Register(fmt.Sprintf("carbon.zipper.%s.find_requests", hostname), Metrics.FindRequests)
		graphite.Register(fmt.Sprintf("carbon.zipper.%s.find_errors", hostname), Metrics.FindErrors)

		graphite.Register(fmt.Sprintf("carbon.zipper.%s.render_requests", hostname), Metrics.RenderRequests)
		graphite.Register(fmt.Sprintf("carbon.zipper.%s.render_errors", hostname), Metrics.RenderErrors)

		graphite.Register(fmt.Sprintf("carbon.zipper.%s.info_requests", hostname), Metrics.InfoRequests)
		graphite.Register(fmt.Sprintf("carbon.zipper.%s.info_errors", hostname), Metrics.InfoErrors)

		graphite.Register(fmt.Sprintf("carbon.zipper.%s.timeouts", hostname), Metrics.Timeouts)

		for i := 0; i <= Config.Buckets; i++ {
			graphite.Register(fmt.Sprintf("carbon.zipper.%s.requests_in_%dms_to_%dms", hostname, i*100, (i+1)*100), bucketEntry(i))
		}
	}

	// configure the storage client
	storageClient.Transport = &http.Transport{
		MaxIdleConnsPerHost: Config.MaxIdleConnsPerHost,
	}

	go probeTlds()
	// force run now
	probeForce <- 1

	portStr := fmt.Sprintf(":%d", Config.Port)
	logger.Logln("listening on", portStr)
	log.Fatal(http.ListenAndServe(portStr, nil))
}
Beispiel #22
0
func init() {
	expvar.NewString("dex.version").Set(version)
}
Beispiel #23
0
func handleConn(in <-chan *net.TCPConn, out chan<- *net.TCPConn, rAddr *net.TCPAddr, cb *circuit.Breaker) {
	for conn := range in {
		cb.Call(func() error {
			return proxy(conn, rAddr)
		}, 0)
	}
}

func closeConn(in <-chan *net.TCPConn) {
	for conn := range in {
		conn.Close()
	}
}

var state = expvar.NewString("state")
var eventsCount = expvar.NewInt("events")
var connectionsCount = expvar.NewInt("connections")

func main() {
	flag.Parse()
	if *verbose {
		log.Printf("%v -> %v\n", *localAddr, *remoteAddr)
	}

	addr, err := net.ResolveTCPAddr("tcp", *localAddr)
	if err != nil {
		log.Fatal("cannot resolve local address: ", err)
	}
	rAddr, err := net.ResolveTCPAddr("tcp", *remoteAddr)
	if err != nil {
Beispiel #24
0
const baseChangeURL = "https://go.googlesource.com/go/+/"

func main() {
	flag.Parse()
	changeURL := fmt.Sprintf("%sgo%s", baseChangeURL, *version)
	http.Handle("/", NewServer(*version, changeURL, *pollPeriod))
	log.Fatal(http.ListenAndServe(*httpAddr, nil))
}

// Exported variables for monitoring the server.
// These are exported via HTTP as a JSON object at /debug/vars.
var (
	hitCount       = expvar.NewInt("hitCount")
	pollCount      = expvar.NewInt("pollCount")
	pollError      = expvar.NewString("pollError")
	pollErrorCount = expvar.NewInt("pollErrorCount")
)

// Server implements the outyet server.
// It serves the user interface (it's an http.Handler)
// and polls the remote repository for changes.
type Server struct {
	version string
	url     string
	period  time.Duration

	mu  sync.RWMutex // protects the yes variable
	yes bool
}
Beispiel #25
0
	"github.com/docker/leadership"
	"github.com/hashicorp/memberlist"
	"github.com/hashicorp/serf/serf"
	"github.com/mitchellh/cli"
)

const (
	// gracefulTimeout controls how long we wait before forcefully terminating
	gracefulTimeout = 3 * time.Second

	defaultRecoverTime = 10 * time.Second
	defaultLeaderTTL   = 20 * time.Second
)

var (
	expNode = expvar.NewString("node")

	// Error thrown on obtained leader from store is not found in member list
	ErrLeaderNotFound = errors.New("No member leader found in member list")
)

// ProcessorFactory is a function type that creates a new instance
// of a processor.
type ProcessorFactory func() (ExecutionProcessor, error)

// AgentCommand run server
type AgentCommand struct {
	Ui               cli.Ui
	Version          string
	ShutdownCh       <-chan struct{}
	ProcessorPlugins map[string]ExecutionProcessor
Beispiel #26
0
func main() {
	port := flag.Int("p", 8080, "port to bind to")
	verbose := flag.Bool("v", false, "enable verbose logging")
	debug := flag.Bool("vv", false, "enable more verbose (debug) logging")
	whisperdata := flag.String("w", config.WhisperData, "location where whisper files are stored")
	maxglobs := flag.Int("maxexpand", config.MaxGlobs, "maximum expansion depth to perform on input via curly braces ({a,b,c})")
	maxprocs := flag.Int("maxprocs", runtime.NumCPU()*80/100, "GOMAXPROCS")
	logdir := flag.String("logdir", "/var/log/carbonserver/", "logging directory")
	logtostdout := flag.Bool("stdout", false, "log also to stdout")
	scanFrequency := flag.Duration("scanfreq", 0, "file index scan frequency (0 to disable file index)")
	interval := flag.Duration("i", 60*time.Second, "interval to report internal statistics to graphite")

	flag.Parse()

	mlog.SetOutput(*logdir, "carbonserver", *logtostdout)

	expvar.NewString("BuildVersion").Set(BuildVersion)
	log.Println("starting carbonserver", BuildVersion)

	loglevel := mlog.Normal
	if *verbose {
		loglevel = mlog.Debug
	}
	if *debug {
		loglevel = mlog.Trace
	}

	logger = mlog.Level(loglevel)

	config.WhisperData = strings.TrimRight(*whisperdata, "/")
	logger.Logf("reading whisper files from: %s", config.WhisperData)

	config.MaxGlobs = *maxglobs
	logger.Logf("maximum brace expansion set to: %d", config.MaxGlobs)

	if *scanFrequency != 0 {
		logger.Logln("use file cache with scan frequency", *scanFrequency)
		force := make(chan struct{})
		go fileListUpdater(*whisperdata, time.Tick(*scanFrequency), force)
		force <- struct{}{}
	}

	runtime.GOMAXPROCS(*maxprocs)
	logger.Logf("set GOMAXPROCS=%d", *maxprocs)

	httputil.PublishTrackedConnections("httptrack")
	expvar.Publish("requestBuckets", expvar.Func(renderTimeBuckets))

	// +1 to track every over the number of buckets we track
	timeBuckets = make([]int64, config.Buckets+1)

	http.HandleFunc("/metrics/find/", httputil.TrackConnections(httputil.TimeHandler(findHandler, bucketRequestTimes)))
	http.HandleFunc("/render/", httputil.TrackConnections(httputil.TimeHandler(fetchHandler, bucketRequestTimes)))
	http.HandleFunc("/info/", httputil.TrackConnections(httputil.TimeHandler(infoHandler, bucketRequestTimes)))

	// nothing in the config? check the environment
	if config.GraphiteHost == "" {
		if host := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); host != ":" {
			config.GraphiteHost = host
		}
	}

	// only register g2g if we have a graphite host
	if config.GraphiteHost != "" {

		logger.Logf("Using graphite host %v", config.GraphiteHost)

		// register our metrics with graphite
		graphite, err := g2g.NewGraphite(config.GraphiteHost, *interval, 10*time.Second)
		if err != nil {
			log.Fatalf("unable to connect to to graphite: %v: %v", config.GraphiteHost, err)
		}

		hostname, _ := os.Hostname()
		hostname = strings.Replace(hostname, ".", "_", -1)

		graphite.Register(fmt.Sprintf("carbon.server.%s.render_requests",
			hostname), Metrics.RenderRequests)
		graphite.Register(fmt.Sprintf("carbon.server.%s.render_errors",
			hostname), Metrics.RenderErrors)
		graphite.Register(fmt.Sprintf("carbon.server.%s.notfound",
			hostname), Metrics.NotFound)
		graphite.Register(fmt.Sprintf("carbon.server.%s.find_requests",
			hostname), Metrics.FindRequests)
		graphite.Register(fmt.Sprintf("carbon.server.%s.find_errors",
			hostname), Metrics.FindErrors)
		graphite.Register(fmt.Sprintf("carbon.server.%s.find_zero",
			hostname), Metrics.FindZero)

		for i := 0; i <= config.Buckets; i++ {
			graphite.Register(fmt.Sprintf("carbon.server.%s.requests_in_%dms_to_%dms", hostname, i*100, (i+1)*100), bucketEntry(i))
		}
	}

	listen := fmt.Sprintf(":%d", *port)
	logger.Logf("listening on %s", listen)
	err := http.ListenAndServe(listen, nil)
	if err != nil {
		log.Fatalf("%s", err)
	}
	logger.Logf("stopped")
}
Beispiel #27
0
func main() {

	z := flag.String("z", "", "zipper")
	port := flag.Int("p", 8080, "port")
	l := flag.Int("l", 20, "concurrency limit")
	cacheType := flag.String("cache", "mem", "cache type to use")
	mc := flag.String("mc", "", "comma separated memcached server list")
	memsize := flag.Int("memsize", 0, "in-memory cache size in MB (0 is unlimited)")
	cpus := flag.Int("cpus", 0, "number of CPUs to use")
	tz := flag.String("tz", "", "timezone,offset to use for dates with no timezone")
	graphiteHost := flag.String("graphite", "", "graphite destination host")
	logdir := flag.String("logdir", "/var/log/carbonapi/", "logging directory")
	logtostdout := flag.Bool("stdout", false, "log also to stdout")

	flag.Parse()

	rl := rotatelogs.NewRotateLogs(
		*logdir + "/carbonapi.%Y%m%d%H%M.log",
	)

	// Optional fields must be set afterwards
	rl.LinkName = *logdir + "/carbonapi.log"

	if *logtostdout {
		log.SetOutput(io.MultiWriter(os.Stdout, rl))
	} else {
		log.SetOutput(rl)
	}

	expvar.NewString("BuildVersion").Set(BuildVersion)
	log.Println("starting carbonapi", BuildVersion)

	if p := os.Getenv("PORT"); p != "" {
		*port, _ = strconv.Atoi(p)
	}

	Limiter = make(chan struct{}, *l)

	if *z == "" {
		log.Fatal("no zipper provided")
	}

	if _, err := url.Parse(*z); err != nil {
		log.Fatal("unable to parze zipper:", err)
	}

	log.Println("using zipper", *z)
	Zipper = zipper{
		z: *z,
		client: &http.Client{
			Transport: &http.Transport{
				MaxIdleConnsPerHost: *l / 2},
		},
	}

	switch *cacheType {
	case "memcache":
		if *mc == "" {
			log.Fatal("memcache cache requested but no memcache servers provided")
		}

		servers := strings.Split(*mc, ",")
		log.Println("using memcache servers:", servers)
		queryCache = &memcachedCache{client: memcache.New(servers...)}
		findCache = &memcachedCache{client: memcache.New(servers...)}

	case "mem":
		qcache := &expireCache{cache: make(map[string]cacheElement), maxSize: uint64(*memsize * 1024 * 1024)}
		queryCache = qcache
		go queryCache.(*expireCache).cleaner()

		findCache = &expireCache{cache: make(map[string]cacheElement)}
		go findCache.(*expireCache).cleaner()

		Metrics.CacheSize = expvar.Func(func() interface{} {
			qcache.Lock()
			size := qcache.totalSize
			qcache.Unlock()
			return size
		})
		expvar.Publish("cache_size", Metrics.CacheSize)

		Metrics.CacheItems = expvar.Func(func() interface{} {
			qcache.Lock()
			size := len(qcache.keys)
			qcache.Unlock()
			return size
		})
		expvar.Publish("cache_items", Metrics.CacheItems)

	case "null":
		queryCache = &nullCache{}
		findCache = &nullCache{}
	}

	if *tz != "" {
		fields := strings.Split(*tz, ",")
		if len(fields) != 2 {
			log.Fatalf("expected two fields for tz,seconds, got %d", len(fields))
		}

		var err error
		offs, err := strconv.Atoi(fields[1])
		if err != nil {
			log.Fatalf("unable to parse seconds: %s: %s", fields[1], err)
		}

		defaultTimeZone = time.FixedZone(fields[0], offs)
		log.Printf("using fixed timezone %s, offset %d ", defaultTimeZone.String(), offs)
	}

	if *cpus != 0 {
		log.Println("using GOMAXPROCS", *cpus)
		runtime.GOMAXPROCS(*cpus)
	}

	if envhost := os.Getenv("GRAPHITEHOST") + ":" + os.Getenv("GRAPHITEPORT"); envhost != ":" || *graphiteHost != "" {

		var host string

		switch {
		case envhost != ":" && *graphiteHost != "":
			host = *graphiteHost
		case envhost != ":":
			host = envhost
		case *graphiteHost != "":
			host = *graphiteHost
		}

		log.Println("Using graphite host", host)

		// register our metrics with graphite
		graphite, err := g2g.NewGraphite(host, 60*time.Second, 10*time.Second)
		if err != nil {
			log.Fatal("unable to connect to to graphite: ", host, ":", err)
		}

		hostname, _ := os.Hostname()
		hostname = strings.Replace(hostname, ".", "_", -1)

		graphite.Register(fmt.Sprintf("carbon.api.%s.requests", hostname), Metrics.Requests)
		graphite.Register(fmt.Sprintf("carbon.api.%s.request_cache_hits", hostname), Metrics.RequestCacheHits)

		graphite.Register(fmt.Sprintf("carbon.api.%s.find_requests", hostname), Metrics.FindRequests)
		graphite.Register(fmt.Sprintf("carbon.api.%s.find_cache_hits", hostname), Metrics.FindCacheHits)

		graphite.Register(fmt.Sprintf("carbon.api.%s.render_requests", hostname), Metrics.RenderRequests)

		graphite.Register(fmt.Sprintf("carbon.api.%s.memcache_timeouts", hostname), Metrics.MemcacheTimeouts)

		if Metrics.CacheSize != nil {
			graphite.Register(fmt.Sprintf("carbon.api.%s.cache_size", hostname), Metrics.CacheSize)
			graphite.Register(fmt.Sprintf("carbon.api.%s.cache_items", hostname), Metrics.CacheItems)
		}
	}

	render := func(w http.ResponseWriter, r *http.Request) {
		var stats renderStats
		t0 := time.Now()
		renderHandler(w, r, &stats)
		since := time.Since(t0)
		log.Println(r.RequestURI, since.Nanoseconds()/int64(time.Millisecond), stats.zipperRequests)
	}

	http.HandleFunc("/render/", corsHandler(render))
	http.HandleFunc("/render", corsHandler(render))

	http.HandleFunc("/metrics/find/", corsHandler(findHandler))
	http.HandleFunc("/metrics/find", corsHandler(findHandler))

	http.HandleFunc("/info/", passthroughHandler)
	http.HandleFunc("/info", passthroughHandler)

	http.HandleFunc("/lb_check", lbcheckHandler)
	http.HandleFunc("/", usageHandler)

	log.Println("listening on port", *port)
	log.Fatalln(http.ListenAndServe(":"+strconv.Itoa(*port), nil))
}
Beispiel #28
0
// InitAgent initializes the agent within vttablet.
func InitAgent(
	tabletAlias topo.TabletAlias,
	dbcfgs dbconfigs.DBConfigs,
	mycnf *mysqlctl.Mycnf,
	dbConfigsFile, dbCredentialsFile string,
	port, securePort int,
	mycnfFile, overridesFile string) (err error) {
	schemaOverrides := loadSchemaOverrides(overridesFile)

	topoServer := topo.GetServer()

	// Start the binlog server service, disabled at start.
	binlogServer = mysqlctl.NewBinlogServer(mycnf)
	mysqlctl.RegisterBinlogServerService(binlogServer)

	// Start the binlog player services, not playing at start.
	binlogPlayerMap = NewBinlogPlayerMap(topoServer, &dbcfgs.Dba)
	RegisterBinlogPlayerMap(binlogPlayerMap)

	// Compute the bind addresses
	bindAddr := fmt.Sprintf(":%v", port)
	secureAddr := ""
	if securePort != 0 {
		secureAddr = fmt.Sprintf(":%v", securePort)
	}

	exportedType := expvar.NewString("tablet-type")

	// Action agent listens to changes in zookeeper and makes
	// modifications to this tablet.
	agent, err = tm.NewActionAgent(topoServer, tabletAlias, mycnfFile, dbConfigsFile, dbCredentialsFile)
	if err != nil {
		return err
	}
	agent.AddChangeCallback(func(oldTablet, newTablet topo.Tablet) {
		if newTablet.IsServingType() {
			if dbcfgs.App.Dbname == "" {
				dbcfgs.App.Dbname = newTablet.DbName()
			}
			dbcfgs.App.KeyRange = newTablet.KeyRange
			dbcfgs.App.Keyspace = newTablet.Keyspace
			dbcfgs.App.Shard = newTablet.Shard
			// Transitioning from replica to master, first disconnect
			// existing connections. "false" indicateds that clients must
			// re-resolve their endpoint before reconnecting.
			if newTablet.Type == topo.TYPE_MASTER && oldTablet.Type != topo.TYPE_MASTER {
				ts.DisallowQueries(false)
			}
			qrs := ts.LoadCustomRules()
			if dbcfgs.App.KeyRange.IsPartial() {
				qr := ts.NewQueryRule("enforce keyspace_id range", "keyspace_id_not_in_range", ts.QR_FAIL_QUERY)
				qr.AddPlanCond(sqlparser.PLAN_INSERT_PK)
				err = qr.AddBindVarCond("keyspace_id", true, true, ts.QR_NOTIN, dbcfgs.App.KeyRange)
				if err != nil {
					log.Warningf("Unable to add keyspace rule: %v", err)
				} else {
					qrs.Add(qr)
				}
			}
			ts.AllowQueries(dbcfgs.App, schemaOverrides, qrs)
			mysqlctl.EnableUpdateStreamService(string(newTablet.Type), dbcfgs)
			if newTablet.Type != topo.TYPE_MASTER {
				ts.StartRowCacheInvalidation()
			}
		} else {
			ts.DisallowQueries(false)
			ts.StopRowCacheInvalidation()
			mysqlctl.DisableUpdateStreamService()
		}

		exportedType.Set(string(newTablet.Type))

		// BinlogServer is only enabled for replicas
		if newTablet.Type == topo.TYPE_REPLICA {
			if !mysqlctl.IsBinlogServerEnabled(binlogServer) {
				mysqlctl.EnableBinlogServerService(binlogServer, dbcfgs.App.Dbname)
			}
		} else {
			if mysqlctl.IsBinlogServerEnabled(binlogServer) {
				mysqlctl.DisableBinlogServerService(binlogServer)
			}
		}

		// See if we need to start or stop any binlog player
		if newTablet.Type == topo.TYPE_MASTER {
			binlogPlayerMap.RefreshMap(newTablet)
		} else {
			binlogPlayerMap.StopAllPlayers()
		}
	})

	mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl)
	if err := agent.Start(bindAddr, secureAddr, mysqld.Addr()); err != nil {
		return err
	}

	// register the RPC services from the agent
	agent.RegisterQueryService(mysqld)

	return nil
}
Beispiel #29
0
func main() {
	hostname, err := os.Hostname()
	if err != nil {
		log.Fatal(err)
	}

	queueName := fmt.Sprintf("cb-event-forwarder:%s:%d", hostname, os.Getpid())

	configLocation := "/etc/cb/integrations/event-forwarder/cb-event-forwarder.conf"
	if flag.NArg() > 0 {
		configLocation = flag.Arg(0)
	}
	config, err = ParseConfig(configLocation)
	if err != nil {
		log.Fatal(err)
	}

	if *checkConfiguration {
		if err := startOutputs(); err != nil {
			log.Fatal(err)
		}
		os.Exit(0)
	}

	addrs, err := net.InterfaceAddrs()

	if err != nil {
		log.Fatal("Could not get IP addresses")
	}

	log.Printf("cb-event-forwarder version %s starting", version)

	exportedVersion := expvar.NewString("version")
	if *debug {
		exportedVersion.Set(version + " (debugging on)")
		log.Printf("*** Debugging enabled: messages may be sent via http://%s:%d/debug/sendmessage ***",
			hostname, config.HTTPServerPort)
	} else {
		exportedVersion.Set(version)
	}
	expvar.Publish("debug", expvar.Func(func() interface{} { return *debug }))

	for _, addr := range addrs {
		if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
			log.Printf("Interface address %s", ipnet.IP.String())
		}
	}

	log.Printf("Configured to capture events: %v", config.EventTypes)
	if err := startOutputs(); err != nil {
		log.Fatalf("Could not startOutputs: %s", err)
	}

	dirs := [...]string{
		"/usr/share/cb/integrations/event-forwarder/content",
		"./static",
	}

	for _, dirname := range dirs {
		finfo, err := os.Stat(dirname)
		if err == nil && finfo.IsDir() {
			http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(dirname))))
			http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
				http.Redirect(w, r, "/static/", 301)
			})
			log.Printf("Diagnostics available via HTTP at http://%s:%d/", hostname, config.HTTPServerPort)
			break
		}
	}

	if *debug {
		http.HandleFunc("/debug/sendmessage", func(w http.ResponseWriter, r *http.Request) {
			if r.Method == "POST" {
				msg := make([]byte, r.ContentLength)
				_, err := r.Body.Read(msg)
				var parsedMsg map[string]interface{}

				err = json.Unmarshal(msg, &parsedMsg)
				if err != nil {
					errMsg, _ := json.Marshal(map[string]string{"status": "error", "error": err.Error()})
					_, _ = w.Write(errMsg)
					return
				}

				err = outputMessage(parsedMsg)
				if err != nil {
					errMsg, _ := json.Marshal(map[string]string{"status": "error", "error": err.Error()})
					_, _ = w.Write(errMsg)
					return
				}
				log.Printf("Sent test message: %s\n", string(msg))
			} else {
				err = outputMessage(map[string]interface{}{
					"type":    "debug.message",
					"message": fmt.Sprintf("Debugging test message sent at %s", time.Now().String()),
				})
				if err != nil {
					errMsg, _ := json.Marshal(map[string]string{"status": "error", "error": err.Error()})
					_, _ = w.Write(errMsg)
					return
				}
				log.Println("Sent test debugging message")
			}

			errMsg, _ := json.Marshal(map[string]string{"status": "success"})
			_, _ = w.Write(errMsg)
		})
	}

	go http.ListenAndServe(fmt.Sprintf(":%d", config.HTTPServerPort), nil)

	log.Println("Starting AMQP loop")
	for {
		err := messageProcessingLoop(config.AMQPURL(), queueName, "go-event-consumer")
		log.Printf("AMQP loop exited: %s. Sleeping for 30 seconds then retrying.", err)
		time.Sleep(30 * time.Second)
	}
}
Beispiel #30
0
package main

import (
	"expvar"
	"net/http"
)

var exportedValue = expvar.NewString("hello")

func main() {
	exportedValue.Set("HELLO")
	http.ListenAndServe(":9090", nil)
}