Exemple #1
0
// NewESConnection creates a new ESConnection with default timeout and size.
func NewESConnection(host string, port string) *ESConnection {
	return &ESConnection{
		Connection: goes.NewConnection(host, port),
		Timeout:    "1m",
		Size:       100,
	}
}
func NewElasticTest(dsn string) *ElasticTest {

	splitted := strings.Split(dsn, ":")
	host := splitted[0]
	port := splitted[1]

	db := goes.NewConnection(host, port)

	return &ElasticTest{dbConnection: db}

}
Exemple #3
0
func NewESClient() DbClient {
	conn := elastigo.NewConn()
	host := eshost1
	conn.Domain = host

	batchconn := goes.NewConnection(host, "9200")
	return &ESClient{
		conn:      conn,
		batchconn: batchconn,
	}
}
Exemple #4
0
func main() {

	log.Println("Начали")

	connection = goes.NewConnection("localhost", "9200")

	CreateIndex()

	OsmConverter()

	PointsAround()

	log.Println("Закончили")
}
func getConnection() (conn *goes.Connection) {
	h := os.Getenv("TEST_ELASTICSEARCH_HOST")
	if h == "" {
		h = ES_HOST
	}

	p := os.Getenv("TEST_ELASTICSEARCH_PORT")
	if p == "" {
		p = ES_PORT
	}

	conn = goes.NewConnection(h, p)

	return
}
Exemple #6
0
// {"dsn":"http://localhost:9200/","level":1}
func (el *esLogger) Init(jsonconfig string) error {
	err := json.Unmarshal([]byte(jsonconfig), el)
	if err != nil {
		return err
	}
	if el.DSN == "" {
		return errors.New("empty dsn")
	} else if u, err := url.Parse(el.DSN); err != nil {
		return err
	} else if u.Path == "" {
		return errors.New("missing prefix")
	} else if host, port, err := net.SplitHostPort(u.Host); err != nil {
		return err
	} else {
		conn := goes.NewConnection(host, port)
		el.Connection = conn
	}
	return nil
}
Exemple #7
0
func main() {

	host := flag.String("host", "localhost", "elasticsearch host")
	port := flag.String("port", "9200", "elasticsearch port")
	indicesString := flag.String("indices", "", "indices to search (or all)")
	fieldsString := flag.String("f", "_id _index", "field or fields space separated")
	timeout := flag.String("timeout", "10m", "scroll timeout")
	size := flag.Int("size", 10000, "scroll batch size")
	nullValue := flag.String("null", "NOT_AVAILABLE", "value for empty fields")
	separator := flag.String("separator", "|", "separator to use for multiple field values")
	delimiter := flag.String("delimiter", "\t", "column delimiter")
	limit := flag.Int("limit", -1, "maximum number of docs to return (return all by default)")
	version := flag.Bool("v", false, "prints current program version")
	cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
	queryString := flag.String("query", "", "custom query to run")
	raw := flag.Bool("raw", false, "stream out the raw json records")
	header := flag.Bool("header", false, "output header row with thie field names")
	singleValue := flag.Bool("1", false, "one value per line (works only with a single column in -f)")
	zeroAsNull := flag.Bool("zero-as-null", false, "treat zero length strings as null values")
	precision := flag.Int("precision", 0, "precision for numeric output")

	flag.Parse()

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if *version {
		fmt.Println(estab.Version)
		os.Exit(0)
	}

	var query map[string]interface{}
	if *queryString == "" {
		query = map[string]interface{}{
			"query": map[string]interface{}{
				"match_all": map[string]interface{}{},
			},
		}
	} else {
		err := json.Unmarshal([]byte(*queryString), &query)
		if err != nil {
			log.Fatal(err)
		}
	}

	indices := strings.Fields(*indicesString)
	fields := strings.Fields(*fieldsString)

	if *raw && *singleValue {
		log.Fatal("-1 xor -raw ")
	}

	if *singleValue && len(fields) > 1 {
		log.Fatalf("-1 works only with a single column, %d given: %s\n", len(fields), strings.Join(fields, " "))
	}

	if !*raw {
		query["fields"] = fields
	}

	conn := goes.NewConnection(*host, *port)
	scanResponse, err := conn.Scan(query, indices, []string{""}, *timeout, *size)
	if err != nil {
		log.Fatal(err)
	}

	w := bufio.NewWriter(os.Stdout)
	defer w.Flush()
	i := 0

	if *header {
		fmt.Fprintln(w, strings.Join(fields, *delimiter))
	}

	for {
		scrollResponse, err := conn.Scroll(scanResponse.ScrollId, *timeout)
		if err == io.EOF {
			break
		}
		if err != nil {
			log.Fatal(err)
		}
		if len(scrollResponse.Hits.Hits) == 0 {
			break
		}
		for _, hit := range scrollResponse.Hits.Hits {
			if i == *limit {
				return
			}
			if *raw {
				b, err := json.Marshal(hit)
				if err != nil {
					log.Fatal(err)
				}
				fmt.Fprintln(w, string(b))
				continue
			}

			var columns []string
			for _, f := range fields {
				var c []string
				switch f {
				case "_id":
					c = append(c, hit.Id)
				case "_index":
					c = append(c, hit.Index)
				case "_type":
					c = append(c, hit.Type)
				case "_score":
					c = append(c, strconv.FormatFloat(hit.Score, 'f', 6, 64))
				default:
					switch value := hit.Fields[f].(type) {
					case nil:
						c = []string{*nullValue}
					case []interface{}:
						for _, e := range value {
							switch e.(type) {
							case string:
								s := e.(string)
								if s == "" && *zeroAsNull {
									c = append(c, *nullValue)
								} else {
									c = append(c, e.(string))
								}
							case float64:
								c = append(c, strconv.FormatFloat(e.(float64), 'f', *precision, 64))
							}
						}
					default:
						log.Fatalf("unknown field type in response: %+v\n", hit.Fields[f])
					}
				}
				if *singleValue {
					for _, value := range c {
						fmt.Fprintln(w, value)
					}
				} else {
					columns = append(columns, strings.Join(c, *separator))
				}
			}
			if !*singleValue {
				fmt.Fprintln(w, strings.Join(columns, *delimiter))
			}
			i++
		}
	}
}
Exemple #8
0
func main() {

	esHost := flag.String("host", "localhost", "elasticsearch host")
	esPort := flag.String("port", "9200", "elasticsearch port")
	likeText := flag.String("like", "", "more like this queries like-text")
	likeFile := flag.String("file", "", "input file")
	fileColumn := flag.String("columns", "1", "which column to use as like-text")
	columnDelimiter := flag.String("delimiter", "\t", "column delimiter of the input file")
	columnNull := flag.String("null", "NOT_AVAILABLE", "column value to ignore")
	indicesString := flag.String("indices", "", "index or indices to query")
	indexFields := flag.String("fields", "content.245.a content.245.b", "index fields to query")
	minTermFreq := flag.Int("min-term-freq", 1, "min term frequency")
	maxQueryTerms := flag.Int("max-query-terms", 25, "max query terms")
	size := flag.Int("size", 5, "maximum number of similar records to report")
	numWorkers := flag.Int("workers", runtime.NumCPU(), "number of workers to use")
	version := flag.Bool("v", false, "prints current program version")
	cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")

	var PrintUsage = func() {
		fmt.Fprintf(os.Stderr, "Usage: %s [OPTIONS]\n", os.Args[0])
		flag.PrintDefaults()
	}

	flag.Parse()

	if *version {
		fmt.Printf("%s\n", esmlt.Version)
		return
	}

	if *likeText == "" && *likeFile == "" {
		PrintUsage()
		os.Exit(1)
	}

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	runtime.GOMAXPROCS(*numWorkers)

	conn := goes.NewConnection(*esHost, *esPort)
	fields := strings.Fields(*indexFields)
	indices := strings.Fields(*indicesString)

	if *likeFile != "" {
		if _, err := os.Stat(*likeFile); os.IsNotExist(err) {
			log.Fatalf("no such file or directory: %s\n", *likeFile)
		}

		file, err := os.Open(*likeFile)
		if err != nil {
			log.Fatal(err)
		}
		defer file.Close()

		scanner := bufio.NewScanner(file)
		projector, err := esmlt.ParseIndices(*fileColumn)
		if err != nil {
			log.Fatalf("could not parse column indices: %s\n", *fileColumn)
		}

		queue := make(chan *Work)
		results := make(chan [][]string)
		done := make(chan bool)

		writer := bufio.NewWriter(os.Stdout)
		defer writer.Flush()
		go FanInWriter(writer, results, done)

		var wg sync.WaitGroup
		for i := 0; i < *numWorkers; i++ {
			wg.Add(1)
			go Worker(queue, results, &wg)
		}

		for scanner.Scan() {
			values := strings.Split(scanner.Text(), *columnDelimiter)
			likeText, err := esmlt.ConcatenateValuesNull(values, projector, *columnNull)
			if err != nil {
				log.Fatal(err)
			}

			work := Work{
				Indices:       indices,
				Connection:    conn,
				Fields:        fields,
				NullValue:     *columnNull,
				LikeText:      likeText,
				MinTermFreq:   *minTermFreq,
				MaxQueryTerms: *maxQueryTerms,
				Size:          *size,
				Values:        values,
			}
			queue <- &work
		}

		if err := scanner.Err(); err != nil {
			log.Fatal(err)
		}

		close(queue)
		wg.Wait()
		close(results)
		select {
		case <-time.After(1e9):
			break
		case <-done:
			break
		}
		return
	}

	if *likeText != "" {
		var query = map[string]interface{}{
			"query": map[string]interface{}{
				"more_like_this": map[string]interface{}{
					"fields":          fields,
					"like_text":       *likeText,
					"min_term_freq":   *minTermFreq,
					"max_query_terms": *maxQueryTerms,
				},
			},
			"size": *size,
		}

		work := Work{
			Indices:       indices,
			Connection:    conn,
			Fields:        fields,
			NullValue:     *columnNull,
			LikeText:      *likeText,
			MinTermFreq:   *minTermFreq,
			MaxQueryTerms: *maxQueryTerms,
			Size:          *size,
			Values:        []string{},
		}
		results := QueryField(&work, &query)
		for _, result := range results {
			fmt.Println(strings.Join(result, "\t"))
		}
		return
	}
}
Exemple #9
0
package main

import (
	// "fmt"
	"net/http"
	"net/url"
	// "html"
	"encoding/json"
	"github.com/belogik/goes"
	"github.com/julienschmidt/httprouter"
	"log"
)

var conn *goes.Connection = goes.NewConnection("elasticsearch", "9200")

func formatJson(w http.ResponseWriter, resp *goes.Response, err error) {
	if err != nil {
		panic(err)
	}

	json.NewEncoder(w).Encode(resp)
}

func Index(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	resp := map[string]string{
		"status": "200",
	}

	json.NewEncoder(w).Encode(resp)
}
Exemple #10
0
func main() {
	flag.Parse()

	host, port, err := net.SplitHostPort(*destination)

	if err != nil {
		log.Fatalf("Error parsing destination: %v", err)
	}

	es := goes.NewConnection(host, port)

	d, err := time.ParseDuration(*interval)

	if err != nil {
		log.Fatalf("Error parsing interval: %v", err)
	}

	for {
		out, err := exec.Command(*cli_path, "-PDList", "-a0").Output()
		if err != nil {
			log.Fatalf("Running LSI command failed: %v", err)
		}

		var (
			slots []SlotStatus
			slot  SlotStatus
		)

		for _, l := range regexp.MustCompile("\r?\n").Split(string(out), -1) {
			if !line_matcher.MatchString(l) {
				continue
			}

			field, value := SplitFieldValue(l)

			switch field {
			case "Slot Number":
				number := FormatNumber(value)

				if number > 0 {
					slots = append(slots, slot)
				}
				slot = SlotStatus{Number: number}
			case "Inquiry Data":
				slot.SplitInquiryData(l)
			case "Media Error Count":
				number := FormatNumber(value)
				slot.MediaErrorCount = number
			case "Other Error Count":
				number := FormatNumber(value)
				slot.OtherErrorCount = number
			case "Firmware state":
				slot.State = value
			case "Drive has flagged a S.M.A.R.T alert":
				if value == "No" {
					slot.SmartAlert = false
				} else {
					slot.SmartAlert = true
				}

			}
		}

		// Append last slot
		slots = append(slots, slot)

		for _, s := range slots {
			_, err = es.Index(s.Document(), url.Values{})

			if err != nil {
				log.Fatalf("Error indexing results: %s", err)
			}
		}

		log.Printf("Done indexing. Sleeping for %s", *interval)
		time.Sleep(d)
	}
}
Exemple #11
0
func GetConnection() *goes.Connection {
	es_host, es_port := getElasticHostPort()
	return goes.NewConnection(es_host, es_port)
}
Exemple #12
0
func GetConnection() *goes.Connection {
	return goes.NewConnection(ES_HOST, ES_PORT)
}