Example #1
0
func startNodeMonitor() {
	// Never do this if serf isn't set up
	if !config.Config.UseSerf {
		return
	}
	go func() {
		// wait 1 minute before starting to check for nodes being up
		time.Sleep(1 * time.Minute)
		ticker := time.NewTicker(time.Minute)
		for _ = range ticker.C {
			unseen, err := node.UnseenNodes()
			if err != nil {
				logger.Errorf(err.Error())
				continue
			}
			for _, n := range unseen {
				logger.Infof("Haven't seen %s for a while, marking as down", n.Name)
				err = n.UpdateStatus("down")
				if err != nil {
					logger.Errorf(err.Error())
					continue
				}
			}
		}
	}()
	return
}
Example #2
0
func (idoc *IdxDoc) update(object Indexable) {
	idoc.m.Lock()
	defer idoc.m.Unlock()
	flattened := util.Indexify(object.Flatten())
	flatText := strings.Join(flattened, "\n")
	/* recover from horrific trie errors that seem to happen with really
	 * big values. :-/ */
	defer func() {
		if e := recover(); e != nil {
			logger.Errorf("There was a problem creating the trie: %s", fmt.Sprintln(e))
		}
	}()
	trie, err := gtrie.Create(flattened)
	if err != nil {
		logger.Errorf(err.Error())
	} else {
		var err error
		idoc.trie, err = compressTrie(trie)
		if err != nil {
			panic(err)
		}
		idoc.docText, err = compressText(flatText)
		if err != nil {
			panic(err)
		}
	}
}
Example #3
0
// TestFiltering tests the filtering of the logging.
func TestFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.SetFilter(func(level logger.LogLevel, info, msg string) bool {
		return level >= logger.LevelWarning && level <= logger.LevelError
	})

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 3)

	logger.UnsetFilter()

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)
}
Example #4
0
func main() {
	config := cachet.Config
	// TODO support log path
	logger.SetLogger(logger.NewTimeformatLogger(os.Stderr, "2006-01-02 15:04:05"))
	logger.SetLevel(logger.LevelDebug)

	logger.Infof("System: %s, API: %s", config.SystemName, config.APIUrl)
	logger.Infof("Starting %d monitors", len(config.MonitorConfigs))

	// initialize monitors
	var allMonitors []*cachet.Monitor
	for _, monconf := range config.MonitorConfigs {
		err, mon := cachet.NewMonitor(&monconf)
		if err == nil {
			err = cachet.SyncMonitor(mon)
			if err != nil {
				logger.Errorf("%v", err)
			}
			allMonitors = append(allMonitors, mon)
		} else {
			logger.Errorf("Parsing monitor error, skipping: %v", err)
		}
	}

	ticker := time.NewTicker(time.Second * time.Duration(config.CheckInterval))
	for range ticker.C {
		for _, m := range allMonitors {
			go m.Check()
		}
	}
}
Example #5
0
func startEventMonitor(sc *serfclient.RPCClient, errch chan<- error) {
	ch := make(chan map[string]interface{}, 10)
	sh, err := sc.Stream("*", ch)
	if err != nil {
		errch <- err
		return
	}
	errch <- nil

	defer sc.Stop(sh)
	// watch the events and queries
	for e := range ch {
		logger.Debugf("Got an event: %v", e)
		eName, _ := e["Name"]
		switch eName {
		case "node_status":
			jsonPayload := make(map[string]string)
			err = json.Unmarshal(e["Payload"].([]byte), &jsonPayload)
			if err != nil {
				logger.Errorf(err.Error())
				continue
			}
			n, _ := node.Get(jsonPayload["node"])
			if n == nil {
				logger.Errorf("No node %s", jsonPayload["node"])
				continue
			}
			err = n.UpdateStatus(jsonPayload["status"])
			if err != nil {
				logger.Errorf(err.Error())
				continue
			}
			r := map[string]string{"response": "ok"}
			response, _ := json.Marshal(r)
			var id uint64
			switch t := e["ID"].(type) {
			case int64:
				id = uint64(t)
			case uint64:
				id = t
			default:
				logger.Errorf("node_status ID %v type %T not int64 or uint64", e["ID"], e["ID"])
				continue
			}
			sc.Respond(id, response)
		}
	}
	return
}
Example #6
0
// ClearIndex of all collections and documents
func ClearIndex() {
	err := objIndex.Clear()
	if err != nil {
		logger.Errorf("Error clearing db for reindexing: %s", err.Error())
	}
	return
}
Example #7
0
// DeleteHashes deletes all the checksum hashes given from the filestore.
func DeleteHashes(fileHashes []string) {
	if config.Config.UseMySQL {
		deleteHashesMySQL(fileHashes)
	} else if config.Config.UsePostgreSQL {
		deleteHashesPostgreSQL(fileHashes)
	} else {
		for _, ff := range fileHashes {
			delFile, err := Get(ff)
			if err != nil {
				logger.Debugf("Strange, we got an error trying to get %s to delete it.\n", ff)
				logger.Debugf(err.Error())
			} else {
				_ = delFile.Delete()
			}
			// May be able to remove this. Check that it actually deleted
			d, _ := Get(ff)
			if d != nil {
				logger.Debugf("Stranger and stranger, %s is still in the file store.\n", ff)
			}
		}
	}
	if config.Config.LocalFstoreDir != "" {
		for _, fh := range fileHashes {
			err := os.Remove(path.Join(config.Config.LocalFstoreDir, fh))
			if err != nil {
				logger.Errorf(err.Error())
			}
		}
	}
}
Example #8
0
// ProcessEvent reads, validates and emits a configuration.
func (b *configuratorBehavior) ProcessEvent(event cells.Event) error {
	switch event.Topic() {
	case ReadConfigurationTopic:
		// Read configuration
		filename, ok := event.Payload().GetString(ConfigurationFilenamePayload)
		if !ok {
			logger.Errorf("cannot read configuration without filename payload")
			return nil
		}
		logger.Infof("reading configuration from %q", filename)
		cfg, err := etc.ReadFile(filename)
		if err != nil {
			return errors.Annotate(err, ErrCannotReadConfiguration, errorMessages)
		}
		// If wanted then validate it.
		if b.validate != nil {
			err = b.validate(cfg)
			if err != nil {
				return errors.Annotate(err, ErrCannotValidateConfiguration, errorMessages)
			}
		}
		// All done, emit it.
		pvs := cells.PayloadValues{
			ConfigurationPayload: cfg,
		}
		b.cell.EmitNewContext(ConfigurationTopic, pvs, event.Context())
	}
	return nil
}
Example #9
0
// logCommand logs a command and its execution status.
func logCommand(cmd string, args []interface{}, err error, log bool) {
	// Format the command for the log entry.
	formatArgs := func() string {
		if args == nil || len(args) == 0 {
			return "(none)"
		}
		output := make([]string, len(args))
		for i, arg := range args {
			output[i] = string(valueToBytes(arg))
		}
		return strings.Join(output, " / ")
	}
	logOutput := func() string {
		format := "CMD %s ARGS %s %s"
		if err == nil {
			return fmt.Sprintf(format, cmd, formatArgs(), "OK")
		}
		return fmt.Sprintf(format, cmd, formatArgs(), "ERROR "+err.Error())
	}
	// Log positive commands only if wanted, errors always.
	if err != nil {
		if errors.IsError(err, ErrServerResponse) || errors.IsError(err, ErrTimeout) {
			return
		}
		logger.Errorf(logOutput())
	} else if log {
		logger.Infof(logOutput())
	}
}
Example #10
0
// checkRecovering checks if the cell may recover after a panic. It will
// signal an error and let the cell stop working if there have been 12 recoverings
// during the last minute or the behaviors Recover() signals, that it cannot
// handle the error.
func (c *cell) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	logger.Warningf("recovering cell %q after error: %v", c.id, rs.Last().Reason)
	// Check frequency.
	if rs.Frequency(c.recoveringNumber, c.recoveringDuration) {
		err := errors.New(ErrRecoveredTooOften, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering frequency of cell %q too high", c.id)
		return nil, err
	}
	// Try to recover.
	if err := c.behavior.Recover(rs.Last().Reason); err != nil {
		err := errors.Annotate(err, ErrEventRecovering, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering of cell %q failed: %v", c.id, err)
		return nil, err
	}
	logger.Infof("successfully recovered cell %q", c.id)
	return rs.Trim(c.recoveringNumber), nil
}
Example #11
0
// checkRecovering checks if the backend can be recovered.
func (c *Crontab) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	if rs.Frequency(12, time.Minute) {
		logger.Errorf("crontab cannot be recovered: %v", rs.Last().Reason)
		return nil, errors.New(ErrCrontabCannotBeRecovered, errorMessages, rs.Last().Reason)
	}
	logger.Warningf("crontab recovered: %v", rs.Last().Reason)
	return rs.Trim(12), nil
}
Example #12
0
// checkRecovering checks if the backend can be recovered.
func (b *stdBackend) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	if rs.Frequency(12, time.Minute) {
		logger.Errorf("standard monitor cannot be recovered: %v", rs.Last().Reason)
		return nil, errors.New(ErrMonitoringCannotBeRecovered, errorMessages, rs.Last().Reason)
	}
	logger.Warningf("standard monitor recovered: %v", rs.Last().Reason)
	return rs.Trim(12), nil
}
Example #13
0
// NewFilterBehavior creates a filter behavior based on the passed function.
// It emits every received event for which the filter function returns true.
func NewFilterBehavior(ff FilterFunc) cells.Behavior {
	if ff == nil {
		ff = func(id string, event cells.Event) bool {
			logger.Errorf("filter processor %q used without function to handle event %v", id, event)
			return true
		}
	}
	return &filterBehavior{nil, ff}
}
Example #14
0
// NewSimpleProcessorBehavior creates a filter behavior based on the passed function.
// Instead of an own logic and an own state it uses the passed simple processor
// function for the event processing.
func NewSimpleProcessorBehavior(spf SimpleProcessorFunc) cells.Behavior {
	if spf == nil {
		spf = func(cell cells.Cell, event cells.Event) error {
			logger.Errorf("simple processor %q used without function to handle event %v", cell.ID(), event)
			return nil
		}
	}
	return &simpleBehavior{nil, spf}
}
Example #15
0
// stop terminates the cell.
func (c *cell) stop() error {
	c.emitTimeoutTicker.Stop()
	err := c.loop.Stop()
	if err != nil {
		logger.Errorf("cell %q terminated with error: %v", c.id, err)
	} else {
		logger.Infof("cell %q terminated", c.id)
	}
	return err
}
Example #16
0
func jsonErrorReport(w http.ResponseWriter, r *http.Request, errorStr string, status int) {
	logger.Infof(errorStr)
	jsonError := map[string][]string{"error": []string{errorStr}}
	w.WriteHeader(status)
	enc := json.NewEncoder(w)
	if err := enc.Encode(&jsonError); err != nil {
		logger.Errorf(err.Error())
	}
	return
}
Example #17
0
func setSaveTicker() {
	if config.Config.FreezeData {
		ds := datastore.New()
		ticker := time.NewTicker(time.Second * time.Duration(config.Config.FreezeInterval))
		go func() {
			for _ = range ticker.C {
				if config.Config.DataStoreFile != "" {
					uerr := ds.Save(config.Config.DataStoreFile)
					if uerr != nil {
						logger.Errorf(uerr.Error())
					}
				}
				ierr := indexer.SaveIndex()
				if ierr != nil {
					logger.Errorf(ierr.Error())
				}
			}
		}()
	}
}
Example #18
0
// checkRecovering checks if the cell may recover after a panic. It will
// signal an error and let the cell stop working if there have been 12 recoverings
// during the last minute or the behaviors Recover() signals, that it cannot
// handle the error.
func (c *cell) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	logger.Errorf("recovering cell %q after error: %v", c.id, rs.Last().Reason)
	// Check frequency.
	if rs.Frequency(c.recoveringNumber, c.recoveringDuration) {
		return nil, errors.New(ErrRecoveredTooOften, errorMessages, rs.Last().Reason)
	}
	// Try to recover.
	if err := c.behavior.Recover(rs.Last().Reason); err != nil {
		return nil, errors.Annotate(err, ErrEventRecovering, errorMessages, rs.Last().Reason)
	}
	return rs.Trim(c.recoveringNumber), nil
}
Example #19
0
// SendEvent sends a serf event out from goiardi.
func SendEvent(eventName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	err = Serfer.UserEvent(eventName, jsonPayload, true)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Example #20
0
// Test logging with the go logger.
func TestGoLogger(t *testing.T) {
	log.SetOutput(os.Stdout)

	logger.SetLevel(logger.LevelDebug)
	logger.SetLogger(logger.NewGoLogger())

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Example #21
0
// SendQuery sends a basic, no frills query out over serf.
func SendQuery(queryName string, payload interface{}) {
	jsonPayload, err := json.Marshal(payload)
	if err != nil {
		logger.Errorf(err.Error())
		return
	}
	q := &serfclient.QueryParam{Name: queryName, Payload: jsonPayload}
	err = Serfer.Query(q)
	if err != nil {
		logger.Debugf(err.Error())
	}
	return
}
Example #22
0
// do checks and performs a job.
func (c *Crontab) do(id string, job Job, now time.Time) {
	if job.ShallExecute(now) {
		go func() {
			cont, err := job.Execute()
			if err != nil {
				logger.Errorf("job %q removed after error: %v", id, err)
				cont = false
			}
			if !cont {
				c.Remove(id)
			}
		}()
	}
}
Example #23
0
// Test log level filtering.
func TestLogLevelFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelError)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 2)
}
Example #24
0
func handleSignals() {
	c := make(chan os.Signal, 1)
	// SIGTERM is not exactly portable, but Go has a fake signal for it
	// with Windows so it being there should theoretically not break it
	// running on windows
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)

	// if we receive a SIGINT or SIGTERM, do cleanup here.
	go func() {
		for sig := range c {
			if sig == os.Interrupt || sig == syscall.SIGTERM {
				logger.Infof("cleaning up...")
				if config.Config.FreezeData {
					if config.Config.DataStoreFile != "" {
						ds := datastore.New()
						if err := ds.Save(config.Config.DataStoreFile); err != nil {
							logger.Errorf(err.Error())
						}
					}
					if err := indexer.SaveIndex(); err != nil {
						logger.Errorf(err.Error())
					}
				}
				if config.UsingDB() {
					datastore.Dbh.Close()
				}
				if config.Config.UseSerf {
					serfin.Serfer.Close()
				}
				os.Exit(0)
			} else if sig == syscall.SIGHUP {
				logger.Infof("Reloading configuration...")
				config.ParseConfigOptions()
			}
		}
	}()
}
Example #25
0
// SendMetric sends lag metric point
func SendMetric(metricID int, delay int64) {
	if metricID <= 0 {
		return
	}

	jsonBytes, _ := json.Marshal(&map[string]interface{}{
		"value": delay,
	})

	resp, _, err := MakeRequest("POST", "/metrics/"+strconv.Itoa(metricID)+"/points", jsonBytes)
	if err != nil || resp.StatusCode != 200 {
		logger.Errorf("Could not log data point: %v", err)
		return
	}
}
Example #26
0
// Test logging with the syslogger.
func TestSysLogger(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	logger.SetLevel(logger.LevelDebug)

	sl, err := logger.NewSysLogger("GOAS")
	assert.Nil(err)
	logger.SetLogger(sl)

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Example #27
0
func NewMonitor(monconfp *json.RawMessage) (error, *Monitor) {
	var m Monitor
	json.Unmarshal(*monconfp, &m)

	if m.Name == "" {
		return errors.New("Monitor \"name\" no set"), nil
	}
	if m.Parameters == nil {
		return errors.New("Monitor \"parameters\" no set"), nil
	}

	if m.Type == "" {
		return errors.New("Monitor \"type\" no set"), nil
	} else {
		switch m.Type {
		case "http":
			var checker monitors.HTTPChecker
			err := json.Unmarshal(m.Parameters, &checker.Parameters)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			err = json.Unmarshal(m.Expect, &checker.Expect)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			m.Checker = &checker
		case "dns":
			var checker monitors.DNSChecker
			err := json.Unmarshal(m.Parameters, &checker.Parameters)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			err = json.Unmarshal(m.Expect, &checker.Expect)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			m.Checker = &checker
		case "ntp":
			var checker monitors.NTPChecker
			err := json.Unmarshal(m.Parameters, &checker.Parameters)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			err = json.Unmarshal(m.Expect, &checker.Expect)
			if err != nil {
				logger.Errorf("Unmarshal: %v", err)
			}
			m.Checker = &checker
		default:
			return errors.New("Unknown type: " + string(m.Type)), nil
		}
	}

	return nil, &m
}
Example #28
0
func setLogEventPurgeTicker() {
	if config.Config.LogEventKeep != 0 {
		ticker := time.NewTicker(time.Second * time.Duration(60))
		go func() {
			for _ = range ticker.C {
				les, _ := loginfo.GetLogInfos(nil, 0, 1)
				if len(les) != 0 {
					p, err := loginfo.PurgeLogInfos(les[0].ID - config.Config.LogEventKeep)
					if err != nil {
						logger.Errorf(err.Error())
					}
					logger.Debugf("Purged %d events automatically", p)
				}
			}
		}()
	}
}
Example #29
0
func reindexAll() {
	reindexObjs := make([]indexer.Indexable, 0, 100)
	// We clear the index, *then* do the fetch because if
	// something comes in between the time we fetch the
	// objects to reindex and when it gets done, they'll
	// just be added naturally
	indexer.ClearIndex()

	for _, v := range client.AllClients() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range node.AllNodes() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range role.AllRoles() {
		reindexObjs = append(reindexObjs, v)
	}
	for _, v := range environment.AllEnvironments() {
		reindexObjs = append(reindexObjs, v)
	}
	defaultEnv, _ := environment.Get("_default")
	reindexObjs = append(reindexObjs, defaultEnv)
	// data bags have to be done separately
	dbags := databag.GetList()
	for _, db := range dbags {
		dbag, err := databag.Get(db)
		if err != nil {
			continue
		}
		dbis := make([]indexer.Indexable, dbag.NumDBItems())
		i := 0
		allDBItems, derr := dbag.AllDBItems()
		if derr != nil {
			logger.Errorf(derr.Error())
			continue
		}
		for _, k := range allDBItems {
			n := k
			dbis[i] = n
			i++
		}
		reindexObjs = append(reindexObjs, dbis...)
	}
	indexer.ReIndex(reindexObjs)
	return
}
Example #30
0
// stop terminates the cell.
func (c *cell) stop() error {
	// Terminate connactions to emitters and subscribers.
	c.emitters.do(func(ec *cell) error {
		ec.subscribers.remove(c.id)
		return nil
	})
	c.subscribers.do(func(sc *cell) error {
		sc.emitters.remove(c.id)
		return nil
	})
	// Stop own backend.
	c.emitTimeoutTicker.Stop()
	err := c.loop.Stop()
	if err != nil {
		logger.Errorf("cell '%s' stopped with error: %v", c.id, err)
	} else {
		logger.Infof("cell '%s' stopped", c.id)
	}
	return err
}