Ejemplo n.º 1
0
// Open a DB by fileName and options.
func Open(fileName string) (*DB, error) {
	// Create if not exist
	_, err := os.Stat(fileName)
	if os.IsNotExist(err) {
		log.Debug("create dir %s", fileName)
		err := os.Mkdir(fileName, filemode)
		if err != nil {
			return nil, err
		}
	}
	// Admindb.
	db := new(DB)
	db.Admin, err = admindb.Open(path.Join(fileName, admindbFileName))
	if err != nil {
		return nil, err
	}
	// Indexdb.
	db.Index, err = indexdb.Open(path.Join(fileName, indexdbFileName))
	if err != nil {
		return nil, err
	}
	// Metricdb.
	db.Metric, err = metricdb.Open(path.Join(fileName, metricdbFileName))
	if err != nil {
		return nil, err
	}
	log.Debug("storage is opened successfully")
	return db, nil
}
Ejemplo n.º 2
0
// Match a metric with rules, and return matched rules.
//
//	If no rules matched, return false.
//	If any black patterns matched, return false.
//	Else, return true and matched rules.
//
func (d *Detector) match(m *models.Metric) (bool, []*models.Rule) {
	// Check rules.
	timer := util.NewTimer()
	rules := d.flt.MatchedRules(m)
	elapsed := timer.Elapsed()
	health.AddFilterCost(elapsed)
	if len(rules) == 0 {
		// Hit no rules.
		return false, rules
	}
	// Check blacklist.
	for _, p := range d.cfg.Detector.BlackList {
		ok, err := filepath.Match(p, m.Name)
		if err != nil {
			// Invalid black pattern.
			log.Error("invalid black pattern: %s, %v", p, err)
			continue
		}
		if ok {
			// Hit black pattern.
			log.Debug("%s hit black pattern %s", m.Name, p)
			return false, rules
		}
	}
	// Ok
	return true, rules
}
Ejemplo n.º 3
0
// migrate db schema.
func (db *DB) migrate() error {
	log.Debug("migrate sql schemas..")
	rule := &models.Rule{}
	user := &models.User{}
	proj := &models.Project{}
	return db.db.AutoMigrate(rule, user, proj).Error
}
Ejemplo n.º 4
0
// Test whether a metric matches the rules.
func (d *Detector) match(m *models.Metric) bool {
	// Check rules.
	rules := d.filter.MatchedRules(m)
	if len(rules) == 0 {
		log.Debug("%s hit no rules", m.Name)
		return false
	}
	// Check blacklist.
	for _, pattern := range d.cfg.Detector.BlackList {
		ok, err := filepath.Match(pattern, m.Name)
		if err == nil && ok {
			log.Debug("%s hit black pattern %s", m.Name, pattern)
			return false
		}
	}
	return true
}
Ejemplo n.º 5
0
func initLog() {
	log.SetName("banshee")
	if *debug {
		log.SetLevel(log.DEBUG)
	}
	goVs := runtime.Version()
	nCPU := runtime.GOMAXPROCS(-1)
	vers := version.Version
	log.Debug("banshee%s %s %d cpu", vers, goVs, nCPU)
}
Ejemplo n.º 6
0
// UpdateWithJSONFile update the config from a json file.
func (c *Config) UpdateWithJSONFile(fileName string) error {
	log.Debug("read config from %s..", fileName)
	b, err := ioutil.ReadFile(fileName)
	if err != nil {
		return err
	}
	err = json.Unmarshal(b, c)
	if err != nil {
		return err
	}
	return err
}
Ejemplo n.º 7
0
func main() {
	// Arguments
	fileName := flag.String("c", "config.json", "config file")
	debug := flag.Bool("d", false, "debug mode")
	vers := flag.Bool("v", false, "version")
	flag.Parse()
	// Version
	if *vers {
		fmt.Fprintln(os.Stdout, version.Version)
		os.Exit(1)
	}
	// Logging
	log.SetName("banshee")
	if *debug {
		log.SetLevel(log.DEBUG)
	}
	log.Debug("using %s, max %d cpu", runtime.Version(), runtime.GOMAXPROCS(-1))
	// Config
	cfg := config.New()
	if flag.NFlag() == 0 || (flag.NFlag() == 1 && *debug == true) {
		log.Warn("no config file specified, using default..")
	} else {
		err := cfg.UpdateWithJSONFile(*fileName)
		if err != nil {
			log.Fatal("failed to load %s, %s", *fileName, err)
		}
	}
	// Storage
	options := &storage.Options{
		NumGrid: cfg.Period[0],
		GridLen: cfg.Period[1],
	}
	db, err := storage.Open(cfg.Storage.Path, options)
	if err != nil {
		log.Fatal("failed to open %s: %v", cfg.Storage.Path, err)
	}
	// Cleaner
	cleaner := cleaner.New(db, cfg.Period[0]*cfg.Period[1])
	go cleaner.Start()
	// Filter
	filter := filter.New()
	filter.Init(db)
	// Alerter
	alerter := alerter.New(cfg, db, filter)
	alerter.Start()
	// Webapp
	go webapp.Start(cfg, db)
	// Detector
	detector := detector.New(cfg, db, filter)
	detector.Out(alerter.In)
	detector.Start()
}
Ejemplo n.º 8
0
// Init from db.
func (f *Filter) Init(db *storage.DB) {
	log.Debug("init filter's rules from cache..")
	// Listen rules changes.
	db.Admin.RulesCache.OnAdd(f.addRuleCh)
	db.Admin.RulesCache.OnDel(f.delRuleCh)
	go f.addRules()
	go f.delRules()
	// Add rules from cache
	rules := db.Admin.RulesCache.All()
	for _, rule := range rules {
		f.addRule(rule)
	}
}
Ejemplo n.º 9
0
// Open a DB by fileName and options.
func Open(fileName string, options *Options) (*DB, error) {
	// Create if not exist
	_, err := os.Stat(fileName)
	if os.IsNotExist(err) {
		log.Debug("create dir %s", fileName)
		err := os.Mkdir(fileName, filemode)
		if err != nil {
			return nil, err
		}
	}
	// Admindb.
	db := new(DB)
	db.Admin, err = admindb.Open(path.Join(fileName, admindbFileName))
	if err != nil {
		return nil, err
	}
	// Indexdb.
	db.Index, err = indexdb.Open(path.Join(fileName, indexdbFileName))
	if err != nil {
		return nil, err
	}
	// Metricdb.
	db.Metric, err = metricdb.Open(path.Join(fileName, metricdbFileName))
	if err != nil {
		return nil, err
	}
	name := fmt.Sprintf("%s-%dx%d", statedbFileName, options.NumGrid, options.GridLen)
	opts := &statedb.Options{NumGrid: options.NumGrid, GridLen: options.GridLen}
	// Statedb.
	db.State, err = statedb.Open(path.Join(fileName, name), opts)
	if err != nil {
		return nil, err
	}
	log.Debug("storage is opened successfully")
	return db, nil
}
Ejemplo n.º 10
0
// Init cache from db.
func (c *rulesCache) Init(db *gorm.DB) error {
	log.Debug("init rules from admindb..")
	// Query
	var rules []models.Rule
	err := db.Find(&rules).Error
	if err != nil {
		return err
	}
	// Load
	for _, rule := range rules {
		// Share
		r := &rule
		r.Share()
		c.rules.Set(rule.ID, r)
	}
	return nil
}
Ejemplo n.º 11
0
// Handle a connection, it will filter the mertics by rules and detect whether
// the metrics are anomalies.
func (d *Detector) handle(conn net.Conn) {
	// New conn
	addr := conn.RemoteAddr()
	defer func() {
		conn.Close()
		log.Info("conn %s disconnected", addr)
	}()
	log.Info("conn %s established", addr)
	// Scan line by line.
	scanner := bufio.NewScanner(conn)
	for scanner.Scan() {
		if err := scanner.Err(); err != nil {
			log.Info("read conn: %v, closing it..", err)
			break
		}
		startAt := time.Now()
		// Parse
		line := scanner.Text()
		m, err := parseMetric(line)
		if err != nil {
			if len(line) > 10 {
				line = line[:10]
			}
			log.Error("parse '%s': %v, skipping..", line, err)
			continue
		}
		// Filter
		if d.match(m) {
			// Detect
			err = d.detect(m)
			if err != nil {
				log.Error("failed to detect: %v, skipping..", err)
				continue
			}
			elapsed := time.Since(startAt)
			log.Debug("%dμs %s %.3f", elapsed.Nanoseconds()/1000, m.Name, m.Score)
			// Output
			d.output(m)
			// Store
			if err := d.store(m); err != nil {
				log.Error("store metric %s: %v, skiping..", m.Name, err)
			}
		}
	}
}
Ejemplo n.º 12
0
// load indexes from db to cache.
func (db *DB) load() {
	log.Debug("init index from indexdb..")
	// Scan values to memory.
	iter := db.db.NewIterator(nil, nil)
	for iter.Next() {
		// Decode
		key := iter.Key()
		value := iter.Value()
		idx := &models.Index{}
		idx.Name = string(key)
		err := decode(value, idx)
		if err != nil {
			// Skip corrupted values
			continue
		}
		db.m.Set(idx.Name, idx)
	}
}
Ejemplo n.º 13
0
// clean checks all indexes and do cleaning.
func (c *Cleaner) clean() {
	idxs := c.db.Index.All()
	// Use local server time and uint32 is enough for further 90 years
	now := uint32(time.Now().Unix())
	for _, idx := range idxs {
		if idx.Stamp+c.cfg.Cleaner.Threshold < now {
			// Long time no data, clean all.
			c.db.Index.Delete(idx.Name)
			c.db.Metric.DeleteTo(idx.Name, idx.Stamp+1) // DeleteTo is right closed
			log.Info("%s fully cleaned", idx.Name)
		} else {
			// Clean outdated metrics.
			n, _ := c.db.Metric.DeleteTo(idx.Name, now-c.cfg.Expiration)
			if n > 0 {
				log.Debug("%s %d outdated metrics cleaned", idx.Name, n)
			}
		}
	}
}