示例#1
0
func (route *baseRoute) update(opts map[string]string, extendConfig baseCfgExtender) error {
	route.Lock()
	defer route.Unlock()
	conf := route.config.Load().(Config)
	match := conf.Matcher()
	prefix := match.Prefix
	sub := match.Sub
	regex := match.Regex
	updateMatcher := false

	for name, val := range opts {
		switch name {
		case "prefix":
			prefix = val
			updateMatcher = true
		case "sub":
			sub = val
			updateMatcher = true
		case "regex":
			regex = val
			updateMatcher = true
		default:
			return fmt.Errorf("no such option '%s'", name)
		}
	}
	if updateMatcher {
		match, err := matcher.New(prefix, sub, regex)
		if err != nil {
			return err
		}
		conf = extendConfig(baseConfig{*match, conf.Dests()})
	}
	route.config.Store(conf)
	return nil
}
示例#2
0
// NewSendFirstMatch creates a sendFirstMatch route.
// We will automatically run the route and the given destinations
func NewSendFirstMatch(key, prefix, sub, regex string, destinations []*dest.Destination) (Route, error) {
	m, err := matcher.New(prefix, sub, regex)
	if err != nil {
		return nil, err
	}
	r := &SendFirstMatch{baseRoute{sync.Mutex{}, atomic.Value{}, key}}
	r.config.Store(baseConfig{*m, destinations})
	r.run()
	return r, nil
}
示例#3
0
func NewConsistentHashing(key, prefix, sub, regex string, destinations []*dest.Destination) (Route, error) {
	m, err := matcher.New(prefix, sub, regex)
	if err != nil {
		return nil, err
	}
	r := &ConsistentHashing{baseRoute{sync.Mutex{}, atomic.Value{}, key}}
	hasher := NewConsistentHasher(destinations)
	r.config.Store(consistentHashingConfig{baseConfig{*m, destinations},
		&hasher})
	r.run()
	return r, nil
}
示例#4
0
// NewGrafanaNet creates a special route that writes to a grafana.net datastore
// We will automatically run the route and the destination
// ignores spool for now
func NewGrafanaNet(key, prefix, sub, regex, addr, apiKey, schemasFile string, spool, sslVerify bool, bufSize, flushMaxNum, flushMaxWait, timeout int) (Route, error) {
	m, err := matcher.New(prefix, sub, regex)
	if err != nil {
		return nil, err
	}
	schemas, err := persister.ReadWhisperSchemas(schemasFile)
	if err != nil {
		return nil, err
	}
	var defaultFound bool
	for _, schema := range schemas {
		if schema.Pattern.String() == ".*" {
			defaultFound = true
		}
		if len(schema.Retentions) == 0 {
			return nil, fmt.Errorf("retention setting cannot be empty")
		}
	}
	if !defaultFound {
		// good graphite health (not sure what graphite does if there's no .*
		// but we definitely need to always be able to determine which interval to use
		return nil, fmt.Errorf("storage-conf does not have a default '.*' pattern")
	}

	cleanAddr := util.AddrToPath(addr)

	r := &GrafanaNet{
		baseRoute: baseRoute{sync.Mutex{}, atomic.Value{}, key},
		addr:      addr,
		apiKey:    apiKey,
		buf:       make(chan []byte, bufSize), // takes about 228MB on 64bit
		schemas:   schemas,

		bufSize:      bufSize,
		flushMaxNum:  flushMaxNum,
		flushMaxWait: time.Duration(flushMaxWait) * time.Millisecond,
		timeout:      time.Duration(timeout) * time.Millisecond,
		sslVerify:    sslVerify,

		numErrFlush:       stats.Counter("dest=" + cleanAddr + ".unit=Err.type=flush"),
		numOut:            stats.Counter("dest=" + cleanAddr + ".unit=Metric.direction=out"),
		durationTickFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=ticker"),
		durationManuFlush: stats.Timer("dest=" + cleanAddr + ".what=durationFlush.type=manual"),
		tickFlushSize:     stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=ticker"),
		manuFlushSize:     stats.Histogram("dest=" + cleanAddr + ".unit=B.what=FlushSize.type=manual"),
		numBuffered:       stats.Gauge("dest=" + cleanAddr + ".unit=Metric.what=numBuffered"),
	}

	r.config.Store(baseConfig{*m, make([]*dest.Destination, 0)})
	go r.run()
	return r, nil
}
// New creates a destination object. Note that it still needs to be told to run via Run().
func New(prefix, sub, regex, addr, spoolDir string, spool, pickle bool, periodFlush, periodReConn time.Duration) (*Destination, error) {
	m, err := matcher.New(prefix, sub, regex)
	if err != nil {
		return nil, err
	}
	addr, instance := addrInstanceSplit(addr)
	cleanAddr := util.AddrToPath(addr)
	dest := &Destination{
		Matcher:      *m,
		Addr:         addr,
		Instance:     instance,
		SpoolDir:     spoolDir,
		Spool:        spool,
		Pickle:       pickle,
		cleanAddr:    cleanAddr,
		periodFlush:  periodFlush,
		periodReConn: periodReConn,
	}
	dest.setMetrics()
	return dest, nil
}
// can't be changed yet: pickle, spool, flush, reconn
func (dest *Destination) Update(opts map[string]string) error {
	match := dest.GetMatcher()
	prefix := match.Prefix
	sub := match.Sub
	regex := match.Regex
	updateMatcher := false
	addr := ""

	for name, val := range opts {
		switch name {
		case "addr":
			addr = val
		case "prefix":
			prefix = val
			updateMatcher = true
		case "sub":
			sub = val
			updateMatcher = true
		case "regex":
			regex = val
			updateMatcher = true
		default:
			return errors.New("no such option: " + name)
		}
	}
	if addr != "" {
		dest.updateConn(addr)
	}
	if updateMatcher {
		match, err := matcher.New(prefix, sub, regex)
		if err != nil {
			return err
		}
		dest.UpdateMatcher(*match)
	}
	return nil
}
func readAddBlack(s *toki.Scanner, table *tbl.Table) error {
	prefix_pat := ""
	sub_pat := ""
	regex_pat := ""
	t := s.Next()
	if t.Token != word {
		return errFmtAddBlack
	}
	method := string(t.Value)
	switch method {
	case "prefix":
		if t = s.Next(); t.Token != word {
			return errFmtAddBlack
		}
		prefix_pat = string(t.Value)
	case "sub":
		if t = s.Next(); t.Token != word {
			return errFmtAddBlack
		}
		sub_pat = string(t.Value)
	case "regex":
		if t = s.Next(); t.Token != word {
			return errFmtAddBlack
		}
		regex_pat = string(t.Value)
	default:
		return errFmtAddBlack
	}

	m, err := matcher.New(prefix_pat, sub_pat, regex_pat)
	if err != nil {
		return err
	}
	table.AddBlacklist(m)
	return nil
}