// The simplest usage of background bulk indexing
func ExampleBulkIndexer_simple() {
	c := elastigo.NewConn()

	indexer := c.NewBulkIndexerErrors(10, 60)
	indexer.Start()
	indexer.Index("twitter", "user", "1", "", nil, `{"name":"bob"}`, true)
	indexer.Stop()
}
// The inspecting the response
func ExampleBulkIndexer_responses() {
	c := elastigo.NewConn()

	indexer := c.NewBulkIndexer(10)
	// Create a custom Sender Func, to allow inspection of response/error
	indexer.Sender = func(buf *bytes.Buffer) error {
		// @buf is the buffer of docs about to be written
		respJson, err := c.DoCommand("POST", "/_bulk", nil, buf)
		if err != nil {
			// handle it better than this
			fmt.Println(string(respJson))
		}
		return err
	}
	indexer.Start()
	for i := 0; i < 20; i++ {
		indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`, true)
	}
	indexer.Stop()
}
Exemple #3
0
func main() {
	flag.Parse()
	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		dieIfError(err)
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
		fmt.Println("cpuprof on")
	}
	if *memprofile != "" {
		f, err := os.Create(*memprofile)
		dieIfError(err)
		defer f.Close()
		defer pprof.WriteHeapProfile(f)
	}

	stats_id = config.String("stats.id", "myhost")
	stats_flush_interval = config.Int("stats.flush_interval", 10)
	err := config.Parse(*configFile)
	dieIfError(err)

	in_conns_current = NewGauge("unit_is_Conn.direction_is_in.type_is_open", false)
	in_conns_broken_total = NewCounter("unit_is_Conn.direction_is_in.type_is_broken", false)
	in_metrics_proto1_good_total = NewCounter("unit_is_Metric.proto_is_1.direction_is_in.type_is_good", false) // no thorough check
	in_metrics_proto2_good_total = NewCounter("unit_is_Metric.proto_is_2.direction_is_in.type_is_good", false)
	in_metrics_proto1_bad_total = NewCounter("unit_is_Err.orig_unit_is_Metric.type_is_invalid.proto_is_1.direction_is_in", false)
	in_metrics_proto2_bad_total = NewCounter("unit_is_Err.orig_unit_is_Metric.type_is_invalid.proto_is_2.direction_is_in", false)
	in_lines_bad_total = NewCounter("unit_is_Err.orig_unit_is_Msg.type_is_invalid_line.direction_is_in", false)
	num_seen_proto1 = NewGauge("unit_is_Metric.proto_is_1.type_is_tracked", true)
	num_seen_proto2 = NewGauge("unit_is_Metric.proto_is_2.type_is_tracked", true)
	pending_backlog_proto1 = NewCounter("unit_is_Metric.proto_is_1.type_is_pending_in_backlog", true)
	pending_backlog_proto2 = NewCounter("unit_is_Metric.proto_is_2.type_is_pending_in_backlog", true)
	pending_es_proto1 = NewGauge("unit_is_Metric.proto_is_1.type_is_pending_in_es", true)
	pending_es_proto2 = NewGauge("unit_is_Metric.proto_is_2.type_is_pending_in_es", true)

	lines_read = make(chan []byte)
	proto1_read = make(chan string, *es_max_backlog)
	proto2_read = make(chan m20.MetricSpec, *es_max_backlog)

	// connect to elasticsearch database to store tags
	es := elastigo.NewConn()
	es.Domain = *es_host
	es.Port = strconv.Itoa(*es_port)

	indexer1 := es.NewBulkIndexer(4)
	indexer1.BulkMaxDocs = *es_max_pending
	indexer1.BufferDelayMax = time.Duration(*es_flush_int) * time.Second
	indexer1.Start()

	indexer2 := es.NewBulkIndexer(4)
	indexer1.BulkMaxDocs = *es_max_pending
	indexer1.BufferDelayMax = time.Duration(*es_flush_int) * time.Second
	indexer2.Start()

	go processInputLines()
	// 1 worker, but ES library has multiple workers
	go trackProto1(indexer1, *es_index_name)
	go trackProto2(indexer2, *es_index_name)

	statsAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", *stats_host, *stats_port))
	dieIfError(err)
	go metrics.Graphite(metrics.DefaultRegistry, time.Duration(*stats_flush_interval)*time.Second, "", statsAddr)

	// listen for incoming metrics
	addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf(":%d", *in_port))
	dieIfError(err)
	listener, err := net.ListenTCP("tcp", addr)
	dieIfError(err)
	defer listener.Close()
	go func() {
		exp.Exp(metrics.DefaultRegistry)
		fmt.Printf("carbon-tagger %s expvar web on %s\n", *stats_id, *stats_http_addr)
		err := http.ListenAndServe(*stats_http_addr, nil)
		if err != nil {
			fmt.Println("Error opening http endpoint:", err.Error())
			os.Exit(1)
		}
	}()

	fmt.Printf("carbon-tagger %s listening on %d\n", *stats_id, *in_port)
	for {
		// would be nice to have a metric showing highest amount of connections seen per interval
		conn_in, err := listener.Accept()
		if err != nil {
			fmt.Fprint(os.Stderr, err)
			continue
		}
		go handleClient(conn_in)
	}
}