Exemplo n.º 1
0
func (mStore MetricStore) ProcessBuffer(c <-chan metricdef.IndvMetric, workerId int) {
	buf := make([]metricdef.IndvMetric, 0)

	// flush buffer every second
	t := time.NewTicker(time.Second)
	for {
		select {
		case b := <-c:
			if b.Name != "" {
				logger.Debugf("worker %d appending to buffer", workerId)
				buf = append(buf, b)
			}
		case <-t.C:
			// A possibility: it might be worth it to hack up the
			// carbon lib to allow batch submissions of metrics if
			// doing them individually proves to be too slow

			//copy contents of buffer
			currentBuf := make([]metricdef.IndvMetric, len(buf))
			copy(currentBuf, buf)
			buf = nil
			logger.Debugf("worker %d flushing %d items in buffer now", workerId, len(currentBuf))
			for _, backend := range mStore.Backends {
				if err := backend.SendMetrics(&currentBuf); err != nil {
					logger.Errorf(err.Error())
				} else {
					logger.Debugf("worker %d flushed metrics buffer to %s backend", workerId, backend.Type())
				}
			}
		}
	}
}
Exemplo n.º 2
0
func processMetrics(d *amqp.Delivery) error {
	metrics := make([]*metricdef.IndvMetric, 0)
	if err := json.Unmarshal(d.Body, &metrics); err != nil {
		return err
	}

	logger.Debugf("The parsed out json: %v", metrics)

	for _, m := range metrics {
		logger.Debugf("processing %s", m.Name)
		id := fmt.Sprintf("%d.%s", m.OrgId, m.Name)
		if m.Id == "" {
			m.Id = id
		}
		if err := metricDefs.CheckMetricDef(id, m); err != nil {
			return err
		}

		if err := storeMetric(m); err != nil {
			return err
		}
	}

	if err := d.Ack(false); err != nil {
		return err
	}
	return nil
}
Exemplo n.º 3
0
// DeleteHashes deletes all the checksum hashes given from the filestore.
func DeleteHashes(fileHashes []string) {
	if config.Config.UseMySQL {
		deleteHashesMySQL(fileHashes)
	} else if config.Config.UsePostgreSQL {
		deleteHashesPostgreSQL(fileHashes)
	} else {
		for _, ff := range fileHashes {
			delFile, err := Get(ff)
			if err != nil {
				logger.Debugf("Strange, we got an error trying to get %s to delete it.\n", ff)
				logger.Debugf(err.Error())
			} else {
				_ = delFile.Delete()
			}
			// May be able to remove this. Check that it actually deleted
			d, _ := Get(ff)
			if d != nil {
				logger.Debugf("Stranger and stranger, %s is still in the file store.\n", ff)
			}
		}
	}
	if config.Config.LocalFstoreDir != "" {
		for _, fh := range fileHashes {
			err := os.Remove(path.Join(config.Config.LocalFstoreDir, fh))
			if err != nil {
				logger.Errorf(err.Error())
			}
		}
	}
}
Exemplo n.º 4
0
// ProcessQueue creates a consumer queue on the given connection using the
// supplied exchange, exchange type, queue pattern, and number of workers. A
// function that satisfies the PayloadProcessor function type is also passed in
// to process the jobs off the queue, and if the payload processor should
// publish the results of its work a Publisher may be supplied. Otherwise nil
// for the Publisher is fine.
func ProcessQueue(conn *amqp.Connection, exchange, exchangeType, queueName, queuePattern string, durable, autoDelete, exclusive bool, errCh chan<- error, qprocessor PayloadProcessor, numWorkers int) error {
	if numWorkers < 1 {
		err := errors.New("numWorkers must be at least 1")
		return err
	}
	for i := 0; i < numWorkers; i++ {
		devs, err := CreateConsumer(conn, exchange, exchangeType, queueName, queuePattern, durable, autoDelete, exclusive)
		if err != nil {
			return nil
		}
		logger.Infof("starting queue %s for %s", exchange, queuePattern)
		go func(i int, exchange string, devs <-chan amqp.Delivery) {
			for d := range devs {
				logger.Debugf("worker %s %d received delivery", exchange, i)
				err := qprocessor(&d)
				if err != nil {
					errCh <- err
					return
				}
			}
			errCh <- nil
		}(i, exchange, devs)
	}
	return nil
}
Exemplo n.º 5
0
func NewKairosdb(host string) (*Kairosdb, error) {
	logger.Debugf("initializing kairosdb client to %s", host)
	return &Kairosdb{
		client: &http.Client{Timeout: (10 * time.Second)},
		host:   host,
	}, nil
}
Exemplo n.º 6
0
// GetStreamOutput gets all ShoveyRunStream objects associated with a ShoveyRun
// of the given output type.
func (sr *ShoveyRun) GetStreamOutput(outputType string, seq int) ([]*ShoveyRunStream, util.Gerror) {
	if config.UsingDB() {
		return sr.getStreamOutSQL(outputType, seq)
	}
	var streams []*ShoveyRunStream
	ds := datastore.New()
	for i := seq; ; i++ {
		logger.Debugf("Getting %s", fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, i))
		s, found := ds.Get("shovey_run_stream", fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, i))
		if !found {
			break
		}
		logger.Debugf("got a stream: %v", s)
		streams = append(streams, s.(*ShoveyRunStream))
	}
	return streams, nil
}
Exemplo n.º 7
0
func (m *MetricDefinition) indexMetric() error {
	resp, err := es.Index("definitions", "metric", m.Id, nil, m)
	logger.Debugf("response ok? %v", resp.Ok)
	if err != nil {
		return err
	}
	return nil
}
Exemplo n.º 8
0
// LogEvent writes an event of the action type, performed by the given actor,
// against the given object.
func LogEvent(doer actor.Actor, obj util.GoiardiObj, action string) error {
	if !config.Config.LogEvents {
		logger.Debugf("Not logging this event")
		return nil
	}
	logger.Debugf("Logging event")

	var actorType string
	if doer.IsUser() {
		actorType = "user"
	} else {
		actorType = "client"
	}
	le := new(LogInfo)
	le.Action = action
	le.Actor = doer
	le.ActorType = actorType
	le.ObjectName = obj.GetName()
	le.ObjectType = reflect.TypeOf(obj).String()
	le.Time = time.Now()
	extInfo, err := datastore.EncodeToJSON(obj)
	if err != nil {
		return err
	}
	le.ExtendedInfo = extInfo
	actorInfo, err := datastore.EncodeToJSON(doer)
	if err != nil {
		return err
	}
	le.ActorInfo = actorInfo
	if config.Config.SerfEventAnnounce {
		qle := make(map[string]interface{}, 4)
		qle["time"] = le.Time
		qle["action"] = le.Action
		qle["object_type"] = le.ObjectType
		qle["object_name"] = le.ObjectName
		go serfin.SendEvent("log-event", qle)
	}

	if config.UsingDB() {
		return le.writeEventSQL()
	}
	return le.writeEventInMem()
}
Exemplo n.º 9
0
func (mdc *MetricDefCache) UpdateDefCache(mdef *MetricDefinition) error {
	mdc.m.Lock()
	defer mdc.m.Unlock()
	md, ok := mdc.mdefs[mdef.Id]

	if ok {
		md.Lock()
		defer md.Unlock()
		logger.Debugf("metric %s found", mdef.Id)
		if md.Def.LastUpdate >= mdef.LastUpdate {
			logger.Debugf("%s is already up to date", mdef.Id)
			return nil
		}
		mdc.mdefs[mdef.Id] = &MetricCacheItem{Def: mdef}
	} else {
		mdc.mdefs[mdef.Id] = &MetricCacheItem{Def: mdef}
	}
	return nil
}
Exemplo n.º 10
0
func NewFromMessage(m *IndvMetric) (*MetricDefinition, error) {
	logger.Debugf("incoming message: %+v", m)
	id := m.Id
	now := time.Now().Unix()

	var ka int
	switch k := m.Extra["keepAlives"].(type) {
	case float64:
		ka = int(k)
	}
	var state int8
	switch s := m.Extra["state"].(type) {
	case float64:
		state = int8(s)
	}

	// input is now validated by json unmarshal

	def := &MetricDefinition{Id: id,
		Name:       m.Name,
		OrgId:      m.OrgId,
		Metric:     m.Metric,
		TargetType: m.TargetType,
		Interval:   m.Interval,
		LastUpdate: now,
		KeepAlives: ka,
		State:      state,
		Unit:       m.Unit,
		Extra:      m.Extra,
	}

	if t, exists := m.Extra["thresholds"]; exists {
		thresh, _ := t.(map[string]interface{})
		for k, v := range thresh {
			switch k {
			case "warnMin":
				def.Thresholds.WarnMin = int(v.(float64))
			case "warnMax":
				def.Thresholds.WarnMax = int(v.(float64))
			case "critMin":
				def.Thresholds.CritMin = int(v.(float64))
			case "critMax":
				def.Thresholds.CritMax = int(v.(float64))
			}
		}
	}

	err := def.Save()
	if err != nil {
		return nil, err
	}

	return def, nil
}
Exemplo n.º 11
0
func (ic *IdxCollection) searchTextCollection(term string, notop bool) (map[string]Document, error) {
	results := make(map[string]Document)
	ic.m.RLock()
	defer ic.m.RUnlock()
	l := len(ic.docs)
	errCh := make(chan error, l)
	resCh := make(chan *searchRes, l)
	for k, v := range ic.docs {
		go func(k string, v *IdxDoc) {
			m, err := v.TextSearch(term)
			if err != nil {
				errCh <- err
				resCh <- nil
			} else {
				errCh <- nil
				if (m && !notop) || (!m && notop) {
					r := &searchRes{k, v}
					logger.Debugf("Adding result %s to channel", k)
					resCh <- r
				} else {
					resCh <- nil
				}
			}
		}(k, v)
	}
	for i := 0; i < l; i++ {
		e := <-errCh
		if e != nil {
			return nil, e
		}
	}
	for i := 0; i < l; i++ {
		r := <-resCh
		if r != nil {
			logger.Debugf("adding result")
			results[r.key] = Document(r.doc)
		}
	}
	rsafe := safeSearchResults(results)
	return rsafe, nil
}
Exemplo n.º 12
0
func (ic *IdxCollection) searchRange(field string, start string, end string, inclusive bool) (map[string]Document, error) {
	results := make(map[string]Document)
	ic.m.RLock()
	defer ic.m.RUnlock()
	l := len(ic.docs)
	errCh := make(chan error, l)
	resCh := make(chan *searchRes, l)
	for k, v := range ic.docs {
		go func(k string, v *IdxDoc) {
			m, err := v.RangeSearch(field, start, end, inclusive)
			if err != nil {
				errCh <- err
				resCh <- nil
			} else {
				errCh <- nil
				if m {
					r := &searchRes{k, v}
					logger.Debugf("Adding result %s to channel", k)
					resCh <- r
				} else {
					resCh <- nil
				}
			}
		}(k, v)
	}
	for i := 0; i < l; i++ {
		e := <-errCh
		if e != nil {
			return nil, e
		}
	}
	for i := 0; i < l; i++ {
		r := <-resCh
		if r != nil {
			logger.Debugf("adding result")
			results[r.key] = Document(r.doc)
		}
	}
	rsafe := safeSearchResults(results)
	return rsafe, nil
}
Exemplo n.º 13
0
// SendEvent sends a serf event out from goiardi.
func SendEvent(eventName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	err = Serfer.UserEvent(eventName, jsonPayload, true)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Exemplo n.º 14
0
func GetMetricDefinition(id string) (*MetricDefinition, error) {
	// TODO: fetch from redis before checking elasticsearch
	if v, err := rs.Get(id).Result(); err != nil && err != redis.Nil {
		logger.Errorf("the redis client bombed: %s", err.Error())
		return nil, err
	} else if err == nil {
		logger.Debugf("json for %s found in elasticsearch: %s", id)
		def, err := DefFromJSON([]byte(v))
		if err != nil {
			return nil, err
		}
		return def, nil
	}

	logger.Debugf("getting %s from elasticsearch", id)
	res, err := es.Get("definitions", "metric", id, nil)
	logger.Debugf("res is: %+v", res)
	if err != nil {
		return nil, err
	}
	logger.Debugf("get returned %q", res.Source)
	logger.Debugf("placing %s into redis", id)
	if rerr := rs.SetEx(id, time.Duration(300)*time.Second, string(*res.Source)).Err(); err != nil {
		logger.Debugf("redis err: %s", rerr.Error())
	}

	def, err := DefFromJSON(*res.Source)
	if err != nil {
		return nil, err
	}

	return def, nil
}
Exemplo n.º 15
0
// SendQuery sends a basic, no frills query out over serf.
func SendQuery(queryName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	q := &serfclient.QueryParam{Name: queryName, Payload: jsonPayload}
	err = Serfer.Query(q)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Exemplo n.º 16
0
func NewMetricStore() (*MetricStore, error) {
	mStore := MetricStore{}
	if setting.Config.EnableKairosdb {
		kairosdb, err := NewKairosdb(setting.Config.KairosdbUrl)
		logger.Debugf("Adding kairosdb to list of backends.")
		if err != nil {
			return nil, err
		}
		mStore.Backends = append(mStore.Backends, kairosdb)
	}

	if setting.Config.EnableCarbon {
		carbon, err := NewCarbon(setting.Config.CarbonAddr, setting.Config.CarbonPort)
		logger.Debugf("Adding Carbon to list of backends.")
		if err != nil {
			return nil, err
		}
		mStore.Backends = append(mStore.Backends, carbon)
	}

	return &mStore, nil
}
Exemplo n.º 17
0
// LogEvent writes an event of the action type, performed by the given actor,
// against the given object.
func LogEvent(doer actor.Actor, obj util.GoiardiObj, action string) error {
	if !config.Config.LogEvents {
		logger.Debugf("Not logging this event")
		return nil
	}
	logger.Debugf("Logging event")

	var actorType string
	if doer.IsUser() {
		actorType = "user"
	} else {
		actorType = "client"
	}
	le := new(LogInfo)
	le.Action = action
	le.Actor = doer
	le.ActorType = actorType
	le.ObjectName = obj.GetName()
	le.ObjectType = reflect.TypeOf(obj).String()
	le.Time = time.Now()
	extInfo, err := datastore.EncodeToJSON(obj)
	if err != nil {
		return err
	}
	le.ExtendedInfo = extInfo
	actorInfo, err := datastore.EncodeToJSON(doer)
	if err != nil {
		return err
	}
	le.ActorInfo = actorInfo

	if config.UsingDB() {
		return le.writeEventSQL()
	}
	return le.writeEventInMem()
}
Exemplo n.º 18
0
func (sq *SolrQuery) execute() (map[string]indexer.Document, error) {
	s := sq.queryChain
	curOp := OpNotAnOp
	for s != nil {
		var r map[string]indexer.Document
		var err error
		switch c := s.(type) {
		case *SubQuery:
			_ = c
			newq, nend, nerr := extractSubQuery(s)
			if nerr != nil {
				return nil, err
			}
			s = nend
			var d map[string]indexer.Document
			if curOp == OpBinAnd {
				d = sq.docs
			} else {
				d = make(map[string]indexer.Document)
			}
			nsq := &SolrQuery{queryChain: newq, idxName: sq.idxName, docs: d}
			r, err = nsq.execute()
		default:
			if curOp == OpBinAnd {
				r, err = s.SearchResults(sq.docs)
			} else {
				r, err = s.SearchIndex(sq.idxName)
			}
		}
		if err != nil {
			return nil, err
		}
		if len(sq.docs) == 0 || curOp == OpBinAnd { // nothing in place yet
			sq.docs = r
		} else if curOp == OpBinOr {
			for k, v := range r {
				sq.docs[k] = v
			}
		} else {
			logger.Debugf("Somehow we got to what should have been an impossible state with search")
		}

		curOp = s.Op()
		s = s.Next()
	}
	return sq.docs, nil
}
Exemplo n.º 19
0
func setLogEventPurgeTicker() {
	if config.Config.LogEventKeep != 0 {
		ticker := time.NewTicker(time.Second * time.Duration(60))
		go func() {
			for _ = range ticker.C {
				les, _ := loginfo.GetLogInfos(nil, 0, 1)
				if len(les) != 0 {
					p, err := loginfo.PurgeLogInfos(les[0].ID - config.Config.LogEventKeep)
					if err != nil {
						logger.Errorf(err.Error())
					}
					logger.Debugf("Purged %d events automatically", p)
				}
			}
		}()
	}
}
Exemplo n.º 20
0
// AllFilestores returns all file checksums and their contents, for exporting.
func AllFilestores() []*FileStore {
	var filestores []*FileStore
	if config.UsingDB() {
		filestores = allFilestoresSQL()
	} else {
		fileList := GetList()
		for _, f := range fileList {
			fl, err := Get(f)
			if err != nil {
				logger.Debugf("File checksum %s was in the list of files, but wasn't found when fetched. Continuing.", f)
				continue
			}
			filestores = append(filestores, fl)
		}
	}
	return filestores
}
Exemplo n.º 21
0
func (sq *SolrQuery) execute() (map[string]*indexer.IdxDoc, error) {
	s := sq.queryChain
	curOp := OpNotAnOp
	for s != nil {
		var r map[string]*indexer.IdxDoc
		var err error
		switch c := s.(type) {
		case *SubQuery:
			_ = c
			newq, nend, nerr := extractSubQuery(s)
			if nerr != nil {
				return nil, err
			}
			s = nend
			d := make(map[string]*indexer.IdxDoc)
			nsq := &SolrQuery{queryChain: newq, idxName: sq.idxName, docs: d}
			r, err = nsq.execute()
		default:
			r, err = s.SearchIndex(sq.idxName)
		}
		if err != nil {
			return nil, err
		}
		if len(sq.docs) == 0 { // nothing in place yet
			sq.docs = r
		} else if curOp == OpBinOr {
			for k, v := range r {
				sq.docs[k] = v
			}
		} else if curOp == OpBinAnd {
			newRes := make(map[string]*indexer.IdxDoc, len(sq.docs)+len(r))
			for k, v := range sq.docs {
				if _, found := r[k]; found {
					newRes[k] = v
				}
			}
			sq.docs = newRes
		} else {
			logger.Debugf("Somehow we got to what should have been an impossible state with search")
		}

		curOp = s.Op()
		s = s.Next()
	}
	return sq.docs, nil
}
Exemplo n.º 22
0
// AddStreamOutput adds a chunk of output from the job to the output list on the
// server stored in the ShoveyRunStream objects.
func (sr *ShoveyRun) AddStreamOutput(output string, outputType string, seq int, isLast bool) util.Gerror {
	if config.UsingDB() {
		return sr.addStreamOutSQL(output, outputType, seq, isLast)
	}
	stream := &ShoveyRunStream{ShoveyUUID: sr.ShoveyUUID, NodeName: sr.NodeName, Seq: seq, OutputType: outputType, Output: output, IsLast: isLast, CreatedAt: time.Now()}
	ds := datastore.New()
	streamKey := fmt.Sprintf("%s_%s_%s_%d", sr.ShoveyUUID, sr.NodeName, outputType, seq)
	logger.Debugf("Setting %s", streamKey)
	_, found := ds.Get("shovey_run_stream", streamKey)
	if found {
		err := util.Errorf("sequence %d for %s - %s already exists", seq, sr.ShoveyUUID, sr.NodeName)
		err.SetStatus(http.StatusConflict)
		return err
	}
	ds.Set("shovey_run_stream", streamKey, stream)

	return nil
}
Exemplo n.º 23
0
func deleteHashesPostgreSQL(fileHashes []string) {
	if len(fileHashes) == 0 {
		return // nothing to do
	}
	tx, err := datastore.Dbh.Begin()
	if err != nil {
		log.Fatal(err)
	}
	deleteQuery := "DELETE FROM goiardi.file_checksums WHERE checksum = ANY($1::varchar(32)[])"
	_, err = tx.Exec(deleteQuery, "{"+strings.Join(fileHashes, ",")+"}")
	if err != nil && err != sql.ErrNoRows {
		logger.Debugf("Error %s trying to delete hashes", err.Error())
		tx.Rollback()
		return
	}
	tx.Commit()
	return
}
Exemplo n.º 24
0
func (e *EventDefinition) Save() error {
	if e.Id == "" {
		u := uuid.NewRandom()
		e.Id = u.String()
	}
	if e.Timestamp == 0 {
		// looks like this expects timestamps in milliseconds
		e.Timestamp = time.Now().UnixNano() / int64(time.Millisecond)
	}
	if err := e.validate(); err != nil {
		return err
	}
	resp, err := es.Index("events", e.EventType, e.Id, nil, e)
	logger.Debugf("response ok? %v", resp.Ok)
	if err != nil {
		return err
	}

	return nil
}
Exemplo n.º 25
0
// AllCookbooks returns all the cookbooks that have been uploaded to this server.
func AllCookbooks() (cookbooks []*Cookbook) {
	if config.UsingDB() {
		cookbooks = allCookbooksSQL()
		for _, c := range cookbooks {
			// populate the versions hash
			c.sortedVersions()
		}
	} else {
		cookbookList := GetList()
		for _, c := range cookbookList {
			cb, err := Get(c)
			if err != nil {
				logger.Debugf("Curious. Cookbook %s was in the cookbook list, but wasn't found when fetched. Continuing.", c)
				continue
			}
			cookbooks = append(cookbooks, cb)
		}
	}
	return cookbooks
}
Exemplo n.º 26
0
func (s *Shovey) checkCompleted() {
	if config.UsingDB() {
		s.checkCompletedSQL()
		return
	}
	srs, err := s.GetNodeRuns()
	if err != nil {
		logger.Debugf("Something went wrong checking for job completion: %s", err.Error())
		return
	}
	c := 0
	for _, sr := range srs {
		if sr.Status == "invalid" || sr.Status == "succeeded" || sr.Status == "failed" || sr.Status == "down" || sr.Status == "nacked" || sr.Status == "cancelled" {
			c++
		}
	}
	if c == len(s.NodeNames) {
		s.Status = "complete"
		s.save()
	}
}
Exemplo n.º 27
0
func deleteHashesMySQL(fileHashes []string) {
	if len(fileHashes) == 0 {
		return // nothing to do
	}
	tx, err := datastore.Dbh.Begin()
	if err != nil {
		log.Fatal(err)
	}
	deleteQuery := "DELETE FROM file_checksums WHERE checksum IN(?" + strings.Repeat(",?", len(fileHashes)-1) + ")"
	delArgs := make([]interface{}, len(fileHashes))
	for i, v := range fileHashes {
		delArgs[i] = v
	}
	_, err = tx.Exec(deleteQuery, delArgs...)
	if err != nil && err != sql.ErrNoRows {
		logger.Debugf("Error %s trying to delete hashes", err.Error())
		tx.Rollback()
		return
	}
	tx.Commit()
	return
}
Exemplo n.º 28
0
func (mdc *MetricDefCache) CheckMetricDef(id string, m *IndvMetric) error {
	mdc.m.Lock()
	defer mdc.m.Unlock()

	_, exists := mdc.mdefs[id]
	if !exists {
		def, err := GetMetricDefinition(id)
		if err != nil {
			if err.Error() == "record not found" {
				logger.Debugf("adding %s to metric defs", id)
				def, err = NewFromMessage(m)
				if err != nil {
					return err
				}
			} else {
				return err
			}
		}
		mdc.mdefs[id] = &MetricCacheItem{Def: def}
	}

	return nil
}
Exemplo n.º 29
0
func main() {
	if setting.Config.ExpvarAddr != "" {
		go func() {
			err := http.ListenAndServe(setting.Config.ExpvarAddr, nil)
			if err != nil {
				fmt.Println("Error starting expvar http listener:", err.Error())
				os.Exit(1)
			}
		}()
	}

	// First fire up a queue to consume metric def events
	mdConn, err := amqp.Dial(setting.Config.RabbitMQURL)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	defer mdConn.Close()
	logger.Debugf("connected")

	done := make(chan error, 1)

	var numCPU int
	if setting.Config.NumWorkers != 0 {
		numCPU = setting.Config.NumWorkers
	} else {
		numCPU = runtime.NumCPU()
	}

	err = qproc.ProcessQueue(mdConn, "metrics", "topic", "", "metrics.*", false, true, true, done, processMetricDefEvent, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = qproc.ProcessQueue(mdConn, "metricResults", "x-consistent-hash", "", "10", false, true, true, done, processMetrics, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = initEventProcessing(mdConn, numCPU, done)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}

	// Signal handling. If SIGQUIT is received, print out the current
	// stack. Otherwise if SIGINT or SIGTERM are received clean up and exit
	// in an orderly fashion.
	go func() {
		sigs := make(chan os.Signal, 1)
		signal.Notify(sigs, syscall.SIGQUIT, os.Interrupt, syscall.SIGTERM)
		buf := make([]byte, 1<<20)
		for sig := range sigs {
			if sig == syscall.SIGQUIT {
				// print out the current stack on SIGQUIT
				runtime.Stack(buf, true)
				log.Printf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end\n", buf)
			} else {
				// finish our existing work, clean up, and exit
				// in an orderly fashion
				logger.Infof("Closing rabbitmq connection")
				cerr := mdConn.Close()
				if cerr != nil {
					logger.Errorf("Received error closing rabbitmq connection: %s", cerr.Error())
				}
				logger.Infof("Closing processing buffer channel")
				close(bufCh)
			}
		}
	}()

	// this channel returns when one of the workers exits.
	err = <-done
	logger.Criticalf("all done!", err)
	if err != nil {
		logger.Criticalf("Had an error, aiiieeee! '%s'", err.Error())
	}
}
Exemplo n.º 30
0
func storeMetric(met *metricdef.IndvMetric) error {
	logger.Debugf("storing metric: %+v", met)
	bufCh <- *met
	return nil
}