Esempio n. 1
0
// NewThrottle creates a new client throttler that blocks spammy clients.
// UPDATED in 2015-01-17: clients now have to specify the limits. Use 10 and
// 1000 if you want to use the old default values.
func NewThrottler(maxPerMinute int, maxHosts int64) *ClientThrottle {
	r := ClientThrottle{
		maxPerMinute: maxPerMinute,
		c:            cache.NewLRUCache(maxHosts),
		blocked:      cache.NewLRUCache(maxHosts),
		stop:         make(chan bool),
	}
	go r.cleanup()
	return &r
}
Esempio n. 2
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, sensitiveMode bool) *SchemaInfo {
	si := &SchemaInfo{
		queryCacheSize: queryCacheSize,
		queries:        cache.NewLRUCache(int64(queryCacheSize)),
		rules:          NewQueryRules(),
		connPool:       NewConnectionPool("", 2, idleTimeout),
		reloadTime:     reloadTime,
		ticks:          timer.NewTimer(reloadTime),
		sensitiveMode:  sensitiveMode,
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(func() time.Duration {
		return si.reloadTime
	}))
	stats.Publish("TableStats", stats.NewMatrixFunc("Table", "Stats", si.getTableStats))
	stats.Publish("TableInvalidations", stats.CountersFunc(si.getTableInvalidations))
	stats.Publish("QueryCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryCount))
	stats.Publish("QueryTimesNs", stats.NewMatrixFunc("Table", "Plan", si.getQueryTime))
	stats.Publish("QueryRowCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryRowCount))
	stats.Publish("QueryErrorCounts", stats.NewMatrixFunc("Table", "Plan", si.getQueryErrorCount))
	// query_plans cannot be shown in sensitive mode
	if !si.sensitiveMode {
		http.Handle("/debug/query_plans", si)
	}
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
Esempio n. 3
0
// NewSchemaInfo creates a new SchemaInfo.
func NewSchemaInfo(
	queryCacheSize int,
	statsPrefix string,
	endpoints map[string]string,
	reloadTime time.Duration,
	idleTimeout time.Duration,
	enablePublishStats bool,
	queryServiceStats *QueryServiceStats) *SchemaInfo {
	si := &SchemaInfo{
		queries:    cache.NewLRUCache(int64(queryCacheSize)),
		connPool:   NewConnPool("", 2, idleTimeout, enablePublishStats, queryServiceStats),
		ticks:      timer.NewTimer(reloadTime),
		endpoints:  endpoints,
		reloadTime: reloadTime,
	}
	if enablePublishStats {
		stats.Publish(statsPrefix+"QueryCacheLength", stats.IntFunc(si.queries.Length))
		stats.Publish(statsPrefix+"QueryCacheSize", stats.IntFunc(si.queries.Size))
		stats.Publish(statsPrefix+"QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
		stats.Publish(statsPrefix+"QueryCacheOldest", stats.StringFunc(func() string {
			return fmt.Sprintf("%v", si.queries.Oldest())
		}))
		stats.Publish(statsPrefix+"SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheStats", []string{"Table", "Stats"}, si.getRowcacheStats)
		_ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheInvalidations", []string{"Table"}, si.getRowcacheInvalidations)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
		_ = stats.NewMultiCountersFunc(statsPrefix+"QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	}
	for _, ep := range endpoints {
		http.Handle(ep, si)
	}
	return si
}
Esempio n. 4
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration) *SchemaInfo {
	si := &SchemaInfo{
		queries:  cache.NewLRUCache(int64(queryCacheSize)),
		rules:    NewQueryRules(),
		connPool: dbconnpool.NewConnectionPool("", 2, idleTimeout),
		ticks:    timer.NewTimer(reloadTime),
	}
	stats.Publish("QueryCacheLength", stats.IntFunc(si.queries.Length))
	stats.Publish("QueryCacheSize", stats.IntFunc(si.queries.Size))
	stats.Publish("QueryCacheCapacity", stats.IntFunc(si.queries.Capacity))
	stats.Publish("QueryCacheOldest", stats.StringFunc(func() string {
		return fmt.Sprintf("%v", si.queries.Oldest())
	}))
	stats.Publish("SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
	_ = stats.NewMultiCountersFunc("TableStats", []string{"Table", "Stats"}, si.getTableStats)
	_ = stats.NewMultiCountersFunc("TableInvalidations", []string{"Table"}, si.getTableInvalidations)
	_ = stats.NewMultiCountersFunc("QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
	_ = stats.NewMultiCountersFunc("QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
	_ = stats.NewMultiCountersFunc("QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
	_ = stats.NewMultiCountersFunc("QueryErrorCounts", []string{"Table", "Plan"}, si.getQueryErrorCount)
	http.Handle("/debug/query_plans", si)
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	http.Handle("/debug/schema", si)
	return si
}
Esempio n. 5
0
func TestSet(t *testing.T) {
	clientID := "clientID"
	duration := time.Hour
	lruCache := lru.NewLRUCache(10)
	cache := NewLRUCache(clientID, duration, lruCache)

	time := time.Unix(1408281677, 0)
	url := "http://example.com/fantasy"
	expectedContent := createLeagueList(League{LeagueKey: "123"})
	cache.Set(url, time, expectedContent)

	cacheKey := cache.getKey(url, time)
	value, ok := lruCache.Get(cacheKey)
	if !ok {
		t.Fatal("Content not set in LRU cache correctly")
	}

	lruCacheValue, ok := value.(*LRUCacheValue)
	if !ok {
		t.Fatalf("Incorrect type used in LRU cache: %T", value)
	}

	if lruCacheValue.content != expectedContent {
		t.Fatalf("Unepxected content in cache\n\texpected: %+v\n\t"+
			"actual: %+v",
			expectedContent,
			lruCacheValue.content)
	}
}
Esempio n. 6
0
func TestGetWithContent(t *testing.T) {
	clientID := "clientID"
	duration := time.Hour
	lruCache := lru.NewLRUCache(10)
	cache := NewLRUCache(clientID, duration, lruCache)

	time := time.Unix(1408281677, 0)
	url := "http://example.com/fantasy"

	cacheKey := cache.getKey(url, time)
	expectedContent := createLeagueList(League{LeagueKey: "123"})
	lruCache.Set(cacheKey, &LRUCacheValue{content: expectedContent})

	content, ok := cache.Get(url, time)
	if !ok {
		t.Fatal("Cache did not return content")
	}

	if content != expectedContent {
		t.Fatalf("Cache did not return expected content\n\texpected: %+v"+
			"\n\tactual: %+v",
			expectedContent,
			content)
	}
}
Esempio n. 7
0
func NewPeerCache(size int64, listLimit int) *PeerCache {
	c := &PeerCache{
		lru:       cache.NewLRUCache(size),
		listLimit: listLimit,
	}

	return c
}
Esempio n. 8
0
func NewPlanner(schema *planbuilder.Schema, cacheSize int) *Planner {
	plr := &Planner{
		schema: schema,
		plans:  cache.NewLRUCache(int64(cacheSize)),
	}
	// TODO(sougou): Uncomment after making Planner testable.
	//http.Handle("/debug/query_plans", plr)
	//http.Handle("/debug/schema", plr)
	return plr
}
Esempio n. 9
0
// NewPlanner creates a new planner for VTGate.
// It will watch the vschema in the topology until the ctx is closed.
func NewPlanner(ctx context.Context, serv topo.SrvTopoServer, cell string, cacheSize int) *Planner {
	plr := &Planner{
		serv:  serv,
		cell:  cell,
		plans: cache.NewLRUCache(int64(cacheSize)),
	}
	plr.WatchSrvVSchema(ctx, cell)
	plannerOnce.Do(func() {
		http.Handle("/debug/query_plans", plr)
		http.Handle("/debug/vschema", plr)
	})
	return plr
}
Esempio n. 10
0
func NewSchemaInfo(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration) *SchemaInfo {
	si := &SchemaInfo{
		queryCacheSize: queryCacheSize,
		queries:        cache.NewLRUCache(uint64(queryCacheSize)),
		rules:          NewQueryRules(),
		connPool:       NewConnectionPool(2, idleTimeout),
		reloadTime:     reloadTime,
		ticks:          timer.NewTimer(reloadTime),
	}
	http.Handle("/debug/query_plans", si)
	http.Handle("/debug/query_stats", si)
	http.Handle("/debug/table_stats", si)
	return si
}
Esempio n. 11
0
// NewEstimator initializes an Estimator object with given capacity and EWMA weightingFactor
func NewEstimator(ca int64, wf float64) *Estimator {
	if ca < 1 {
		log.Infof("Invalid capacity value: %v, falling back to default(%v)", ca, DefaultCapacity)
		ca = DefaultCapacity
	}
	if wf < 0 || wf > 1 {
		log.Infof("Invalid weighting factor: %v, falling back to default(%v)", wf, ewma.DefaultWeightingFactor)
		wf = ewma.DefaultWeightingFactor
	}
	return &Estimator{
		records:         cache.NewLRUCache(ca),
		weightingFactor: wf,
	}
}
Esempio n. 12
0
func TestGetNoContent(t *testing.T) {
	clientID := "clientID"
	duration := time.Hour
	lruCache := lru.NewLRUCache(10)
	cache := NewLRUCache(clientID, duration, lruCache)

	time := time.Unix(1408281677, 0)
	content, ok := cache.Get("http://example.com/fantasy", time)

	if ok {
		t.Fatalf("Cache returned content when it should not have been cached"+
			"content: %+v",
			content)
	}
}
Esempio n. 13
0
func TestGetContentOfWrongType(t *testing.T) {
	clientID := "clientID"
	duration := time.Hour
	lruCache := lru.NewLRUCache(10)
	cache := NewLRUCache(clientID, duration, lruCache)

	time := time.Unix(1408281677, 0)
	url := "http://example.com/fantasy"

	cacheKey := cache.getKey(url, time)
	lruCache.Set(cacheKey, mockedValue{})

	content, ok := cache.Get(url, time)
	if ok {
		t.Fatalf("Cache returned content when it the wrong type had been cached"+
			"content: %+v",
			content)
	}
}
Esempio n. 14
0
// NewConsolidator creates a new Consolidator
func NewConsolidator() *Consolidator {
	return &Consolidator{queries: make(map[string]*Result), consolidations: cache.NewLRUCache(1000)}
}
Esempio n. 15
0
package lookup

import (
	"time"

	"github.com/youtube/vitess/go/cache"
)

const (
	cacheCapacity = 1024 * 1024 // 1MB
	cacheTTL      = 1           // 1 second
)

var (
	resolveCache = cache.NewLRUCache(cacheCapacity)
)

type cacheValue struct {
	Value     []string
	CreatedAt int64
}

func (cv *cacheValue) Size() int {
	var size int
	for _, s := range cv.Value {
		size += len(s)
	}
	return size
}

func (cv *cacheValue) Expired() bool {
Esempio n. 16
0
// NewConsolidator creates a new Consolidator
func NewConsolidator() *Consolidator {
	co := &Consolidator{queries: make(map[string]*Result), consolidations: cache.NewLRUCache(1000)}
	http.Handle("/debug/consolidations", co)
	return co
}