func main() { c := elastigo.NewConn() log.SetFlags(log.LstdFlags) flag.Parse() fmt.Println("host = ", *host) // Set the Elasticsearch Host to Connect to c.Domain = *host // Index a document _, err := c.Index("testindex", "user", "docid_1", nil, `{"name":"bob"}`) exitIfErr(err) // Index a doc using a map of values _, err = c.Index("testindex", "user", "docid_2", nil, map[string]string{"name": "venkatesh"}) exitIfErr(err) // Index a doc using Structs _, err = c.Index("testindex", "user", "docid_3", nil, MyUser{"wanda", 22}) exitIfErr(err) // Search Using Raw json String searchJson := `{ "query" : { "term" : { "Name" : "wanda" } } }` out, err := c.Search("testindex", "user", nil, searchJson) if len(out.Hits.Hits) == 1 { fmt.Println("%v", out.Hits.Hits[0].Source) } exitIfErr(err) }
// The simplest usage of background bulk indexing func ExampleBulkIndexer_simple() { c := elastigo.NewConn() indexer := c.NewBulkIndexerErrors(10, 60) indexer.Start() indexer.Index("twitter", "user", "1", "", nil, `{"name":"bob"}`, true) indexer.Stop() }
func hitEsearch() { perfMon := Performance{} for i := 0; i < Requests; i++ { e := elastigo.NewConn() log.SetFlags(log.LstdFlags) flag.Parse() //get random hosts if randInt(0, 1) > 0 { e.Domain = *host } else { e.Domain = *host2 } //get random search word search := b[randInt(0, len(b))] t1 := time.Now() out, err := elastigo.Search("stuff").Type("thing"). Size("100").Search(search).Result(e) _, err = json.Marshal(out) t2 := time.Now() duration := t2.Sub(t1) //s := duration.String() var castToInt64 int64 = duration.Nanoseconds() / 1e6 if castToInt64 > perfMon.MaxLatency { perfMon.MaxLatency = castToInt64 } if out.Took > perfMon.MaxEsearch { perfMon.MaxEsearch = out.Took } if castToInt64 < perfMon.MinLatency { perfMon.MinLatency = castToInt64 } if out.Took < perfMon.MinEsearch { perfMon.MinEsearch = out.Took } if perfMon.MinEsearch == 0 { perfMon.MinEsearch = out.Took perfMon.AvgEsearch = out.Took } else { perfMon.AvgEsearch = (perfMon.AvgEsearch + out.Took) / 2 } if perfMon.MinLatency == 0 { perfMon.MinLatency = castToInt64 perfMon.AvgLatency = castToInt64 } else { perfMon.AvgLatency = (perfMon.AvgLatency + castToInt64) / 2 } checkErr(err, "search err:") } fmt.Printf("Thread Log: %v\n", perfMon) logPerf(perfMon) c <- 1 }
// The inspecting the response func ExampleBulkIndexer_responses() { c := elastigo.NewConn() indexer := c.NewBulkIndexer(10) // Create a custom Sender Func, to allow inspection of response/error indexer.Sender = func(buf *bytes.Buffer) error { // @buf is the buffer of docs about to be written respJson, err := c.DoCommand("POST", "/_bulk", nil, buf) if err != nil { // handle it better than this fmt.Println(string(respJson)) } return err } indexer.Start() for i := 0; i < 20; i++ { indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`, true) } indexer.Stop() }
// The simplest usage of background bulk indexing with error channel func ExampleBulkIndexer_errorsmarter() { c := elastigo.NewConn() indexer := c.NewBulkIndexerErrors(10, 60) indexer.Start() errorCt := 0 // use sync.atomic or something if you need timer := time.NewTicker(time.Minute * 3) go func() { for { select { case _ = <-timer.C: if errorCt < 2 { errorCt = 0 } // XXX(j): Totally unsure what this thing thought it was doing in the // first place, looks like it was stealing the stop message from the // indexer. Don't trust this example! case _ = <-done: return } } }() go func() { for errBuf := range indexer.ErrorChannel { errorCt++ fmt.Println(errBuf.Err) // log to disk? db? ???? Panic } }() for i := 0; i < 20; i++ { indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`, true) } reply := make(chan struct{}) done <- reply <-reply close(done) }