Example #1
0
// NewBranchTileBuilder returns an instance of BranchTileBuilder that allows
// creating tiles based on the given VCS or code review system based on
// querying db.
//
// TODO(stephana): The EventBus is used to update the internal cache as commits are updated.
func NewBranchTileBuilder(db DB, git *gitinfo.GitInfo, reviewURL string, evt *eventbus.EventBus) BranchTileBuilder {
	return &tileBuilder{
		db:        db,
		vcs:       git,
		review:    rietveld.New(reviewURL, util.NewTimeoutClient()),
		reviewURL: reviewURL,
		cache:     lru.New(MAX_ISSUE_CACHE_SIZE),
		tcache:    lru.New(MAX_TILE_CACHE_SIZE),
	}
}
Example #2
0
func New(oncecap, twicecap int) *Cache {
	c := &Cache{
		once:  lru.New(oncecap),
		twice: lru.New(twicecap),
	}
	// make sure keys evicted from two make it to the head of one
	c.twice.OnEvicted = func(k lru.Key, v interface{}) {
		c.once.Add(k, v)
	}
	return c
}
Example #3
0
// NewTraceServiceServer creates a new DB that stores the data in BoltDB format at
// the given filename location.
func NewTraceServiceServer(filename string) (*TraceServiceImpl, error) {
	d, err := bolt.Open(filename, 0600, &bolt.Options{Timeout: 1 * time.Second})
	if err != nil {
		return nil, fmt.Errorf("Failed to open BoltDB at %s: %s", filename, err)
	}
	createBuckets := func(tx *bolt.Tx) error {
		_, err := tx.CreateBucketIfNotExists([]byte(COMMIT_BUCKET_NAME))
		if err != nil {
			return fmt.Errorf("Failed to create bucket %s: %s", COMMIT_BUCKET_NAME, err)
		}
		_, err = tx.CreateBucketIfNotExists([]byte(TRACE_BUCKET_NAME))
		if err != nil {
			return fmt.Errorf("Failed to create bucket %s: %s", TRACE_BUCKET_NAME, err)
		}
		_, err = tx.CreateBucketIfNotExists([]byte(TRACEID_BUCKET_NAME))
		if err != nil {
			return fmt.Errorf("Failed to create bucket %s: %s", TRACEID_BUCKET_NAME, err)
		}
		return nil
	}
	if err := d.Update(createBuckets); err != nil {
		return nil, fmt.Errorf("Failed to create buckets: %s", err)
	}
	return &TraceServiceImpl{
		db:    d,
		cache: lru.New(MAX_INT64_ID_CACHED),
	}, nil
}
Example #4
0
func newPeerStore(maxInfoHashes, maxInfoHashPeers int) *peerStore {
	return &peerStore{
		values:           lru.New(maxInfoHashes),
		maxInfoHashes:    maxInfoHashes,
		maxInfoHashPeers: maxInfoHashPeers,
	}
}
Example #5
0
func BenchmarkSetGPLRU(b *testing.B) {
	plru := lru.New(lruSize)
	// run the Fib function b.N times
	for n := 0; n < b.N; n++ {
		plru.Add("A"+strconv.FormatInt(int64(n), 10), "A")
	}
}
func (ac *AdminController) SaveBlogEditCtr(c *gin.Context) {
	session := sessions.Default(c)
	username := session.Get("username")
	if username == nil {
		(&umsg{"You have no permission", "/"}).ShowMessage(c)
		return
	}
	var BI EditBlogItem
	c.BindWith(&BI, binding.Form)
	if BI.Aid == "" {
		(&umsg{"Can not find the blog been edit", "/"}).ShowMessage(c)
		return
	}
	if BI.Title == "" {
		(&umsg{"Title can not empty", "/"}).ShowMessage(c)
		return
	}
	if BI.Content == "" {
		(&umsg{"Content can not empty", "/"}).ShowMessage(c)
		return
	}
	_, err := DB.Exec("update top_article set title=?, content=? where aid = ?", BI.Title, BI.Content, BI.Aid)
	if err == nil {
		Cache = lru.New(8192)
		(&umsg{"Success", "/"}).ShowMessage(c)
	} else {
		(&umsg{"Failed to save blog", "/"}).ShowMessage(c)
	}

}
Example #7
0
// NewTraceServiceDB creates a new DB that stores the data in the BoltDB backed
// gRPC accessible traceservice.
func NewTraceServiceDB(conn *grpc.ClientConn, traceBuilder tiling.TraceBuilder) (*TsDB, error) {
	ret := &TsDB{
		conn:         conn,
		traceService: traceservice.NewTraceServiceClient(conn),
		traceBuilder: traceBuilder,
		cache:        lru.New(MAX_ID_CACHED),
		ctx:          context.Background(),
	}

	// This ping causes the client to try and reach the backend. If the backend
	// is down, it will keep trying until it's up.
	if err := ret.ping(); err != nil {
		return nil, err
	}

	go func() {
		liveness := metrics.NewLiveness("tracedb-ping")
		for _ = range time.Tick(time.Minute) {
			if ret.ping() == nil {
				liveness.Update()
			}
		}
	}()
	return ret, nil
}
func (ac *AdminController) SaveBlogAddCtr(c *gin.Context) {
	session := sessions.Default(c)
	username := session.Get("username")
	if username == nil {
		(&umsg{"You have no permission", "/"}).ShowMessage(c)
		return
	}
	var BI BlogItem
	c.BindWith(&BI, binding.Form)
	if BI.Title == "" {
		(&umsg{"Title can not empty", "/"}).ShowMessage(c)
		return
	}
	if BI.Content == "" {
		(&umsg{"Content can not empty", "/"}).ShowMessage(c)
		return
	}
	_, err := DB.Exec(
		"insert into top_article (title, content, publish_time, publish_status) values (?, ?, ?, 1)",
		BI.Title, BI.Content, time.Now().Format("2006-01-02 15:04:05"))
	if err == nil {
		Cache = lru.New(8192)
		(&umsg{"Success", "/"}).ShowMessage(c)
	} else {
		(&umsg{"Failed to save blog", "/"}).ShowMessage(c)
	}

}
Example #9
0
func initStmtsLRU(max int) {
	if stmtsLRU.lru != nil {
		stmtsLRU.Max(max)
	} else {
		stmtsLRU.lru = lru.New(max)
	}
}
Example #10
0
// Start initializes the limiter for execution.
func (il *intervalVaryByLimiter) Start() {
	if il.bursts < 0 {
		il.bursts = 0
	}
	il.keys = lru.New(il.maxKeys)
	il.keys.OnEvicted = il.stopProcess
}
Example #11
0
File: db.go Project: CowLeo/GoRedis
func New(rdb *gorocksdb.DB) *DB {
	db := &DB{rdb: rdb}
	db.wo = gorocksdb.NewDefaultWriteOptions()
	db.ro = gorocksdb.NewDefaultReadOptions()
	db.caches = lru.New(1000)
	db.RawSet([]byte{MAXBYTE}, nil) // for Enumerator seek to last
	return db
}
Example #12
0
func newPeerStore(maxInfoHashes, maxInfoHashPeers int) *peerStore {
	return &peerStore{
		infoHashPeers:        lru.New(maxInfoHashes),
		localActiveDownloads: make(map[InfoHash]bool),
		maxInfoHashes:        maxInfoHashes,
		maxInfoHashPeers:     maxInfoHashPeers,
	}
}
Example #13
0
// NewSession wraps an existing Node.
func NewSession(p ConnectionPool, c ClusterConfig) *Session {
	session := &Session{Pool: p, cons: c.Consistency, prefetch: 0.25, cfg: c}

	// create the query info cache
	session.routingKeyInfoCache.lru = lru.New(c.MaxRoutingKeyInfo)

	return session
}
Example #14
0
func newStore() *Store {
	return &Store{
		Created:   time.Now().UnixNano(),
		Streams:   lru.New(maxStreams),
		Updates:   make(chan *Object, 100),
		streams:   make(map[string]int64),
		metadatas: make(map[string]*Metadata),
	}
}
Example #15
0
func NewFileHandler(notFound http.Handler) *FileHandler {
	reaper := &Reaper{make(chan Life)}
	go reaper.Run()
	lockChan := make(chan bool, 1)
	lockChan <- true

	return &FileHandler{
		make(chan string), notFound, reaper, lru.New(32), lockChan}
}
Example #16
0
// NewSession wraps an existing Node.
func NewSession(cfg ClusterConfig) (*Session, error) {
	//Check that hosts in the ClusterConfig is not empty
	if len(cfg.Hosts) < 1 {
		return nil, ErrNoHosts
	}

	maxStreams := 128
	if cfg.ProtoVersion > protoVersion2 {
		maxStreams = 32768
	}

	if cfg.NumStreams <= 0 || cfg.NumStreams > maxStreams {
		cfg.NumStreams = maxStreams
	}

	pool, err := cfg.ConnPoolType(&cfg)
	if err != nil {
		return nil, err
	}

	//Adjust the size of the prepared statements cache to match the latest configuration
	stmtsLRU.Lock()
	initStmtsLRU(cfg.MaxPreparedStmts)
	stmtsLRU.Unlock()

	s := &Session{
		Pool:     pool,
		cons:     cfg.Consistency,
		prefetch: 0.25,
		cfg:      cfg,
	}

	//See if there are any connections in the pool
	if pool.Size() > 0 {
		s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo)

		s.SetConsistency(cfg.Consistency)
		s.SetPageSize(cfg.PageSize)

		if cfg.DiscoverHosts {
			s.hostSource = &ringDescriber{
				session:    s,
				dcFilter:   cfg.Discovery.DcFilter,
				rackFilter: cfg.Discovery.RackFilter,
				closeChan:  make(chan bool),
			}

			go s.hostSource.run(cfg.Discovery.Sleep)
		}

		return s, nil
	}

	s.Close()

	return nil, ErrNoConnectionsStarted
}
Example #17
0
func NewHandler() *GODNSHandler {

	var (
		resolver *Resolver
		Cache    *MemoryCache
	)
	resolver = &Resolver{}
	Cache = &MemoryCache{lru.New(MAX_CACHES), time.Duration(EXPIRE_SECONDS) * time.Second, MAX_CACHES}
	return &GODNSHandler{resolver, Cache}
}
Example #18
0
func BenchmarkNotSequentialGPLRU(b *testing.B) {
	plru := lru.New(lruSize)
	for i := 0; i < lruSize; i++ {
		plru.Add("A"+strconv.FormatInt(int64(i), 10), "A")
	}
	// run the Fib function b.N times
	for n := 1; n <= b.N; n++ {
		plru.Get("A" + strconv.FormatInt(int64(lruSize%n), 10))
	}
}
Example #19
0
func NewHandler() *GODNSHandler {

	var (
		resolver *Resolver
		Cache    *lru.Cache
	)
	resolver = &Resolver{}
	Cache = lru.New(MAX_CACHES)
	return &GODNSHandler{resolver, Cache}
}
Example #20
0
// NewEventAggregator returns a new instance of an EventAggregator
func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc,
	maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator {
	return &EventAggregator{
		cache:                lru.New(lruCacheSize),
		keyFunc:              keyFunc,
		messageFunc:          messageFunc,
		maxEvents:            maxEvents,
		maxIntervalInSeconds: maxIntervalInSeconds,
		clock:                clock,
	}
}
Example #21
0
func NewDomains(cacheSize int) *Domains {
	d := Domains{
		cacheSize:     cacheSize,
		cache:         lru.New(cacheSize),
		baseDomains:   make(map[string][]UserData),
		suffixDomains: make(map[string][]UserData),
		panDomains:    make(map[string]*domainRecord),
		regexDomains:  make(map[string]*domainRecord),
	}
	return &d
}
Example #22
0
func NewMemLRUCache(maxEntries int) *MemLRUCache {
	ret := &MemLRUCache{
		cache: lru.New(maxEntries),
		keys:  map[string]bool{},
	}

	ret.cache.OnEvicted = func(key lru.Key, value interface{}) {
		delete(ret.keys, getStringKey(key))
	}

	return ret
}
Example #23
0
// BuildNavigation builds the Navigation for the DocSet.
func (d *DocSet) BuildNavigation() {
	// Walk the directory tree to build the navigation menu.
	root := filepath.Join(d.repoDir, config.REPO_SUBDIR)
	node, _ := walk(root, root)
	addDepth(node, 1)
	printnode(node, 0)
	s := buildNavString(node)
	d.mutex.Lock()
	defer d.mutex.Unlock()
	d.cache = lru.New(MARKDOWN_CACHE_SIZE)
	d.navigation = s
}
Example #24
0
File: favicon.go Project: heyLu/lp
func main() {
	flag.Parse()

	faviconCache = lru.New(*cacheSize)
	imageCache = lru.New(*cacheSize)
	imageHashes = lru.New(*cacheSize)

	http.HandleFunc("/favicon", HandleGetFavicon)
	http.HandleFunc("/favicon_proxy", HandleProxy)
	if p := os.Getenv("PORT"); p != "" {
		flag.Set("p", p)
	}

	addr := fmt.Sprintf("localhost:%d", *port)
	fmt.Printf("listening on %s\n", addr)
	err := http.ListenAndServe(addr, nil)
	if err != nil {
		fmt.Println("error: ", err)
		os.Exit(1)
	}
}
Example #25
0
func NewRedisDriver(server string) RedisDriver {
	parts := strings.SplitN(server, "://", 2)
	pool := redis.NewPool(func() (conn redis.Conn, err error) {
		conn, err = redis.Dial("tcp", parts[1])
		return
	}, 3)
	var cache *lru.Cache
	cache = lru.New(1000)
	var RWLocker = new(sync.Mutex)

	return RedisDriver{pool: pool, cache: cache, RWLocker: RWLocker}
}
Example #26
0
// 移除域名
// 使用过滤函数来识别需要删除的内容,返回 true 时表示需要删除
// 基本、后缀、泛解析的域名会被转换成为小写格式,f 函数处理时需要注意。
func (d *Domains) RemoveType(domainType DomainType, f func(domain string, domainType DomainType, uesrdata UserData) bool) {
	d.rwm.Lock()
	defer d.rwm.Unlock()

	removeBase := func(domainType DomainType, domains map[string][]UserData) *map[string][]UserData {
		newDomains := make(map[string][]UserData)
		for domain, userdatas := range domains {
			newUserdatas := make([]UserData, 0)
			for _, userdata := range userdatas {
				if f(domain, domainType, userdata) == false {
					newUserdatas = append(newUserdatas, userdata)
				}
			}
			if len(newUserdatas) != 0 {
				newDomains[domain] = newUserdatas
			}
		}
		return &newDomains
	}

	removeRegex := func(domainType DomainType, domains *map[string]*domainRecord) {
		delDomains := make([]string, 0)
		for domain, record := range *domains {
			newUserdatas := make([]UserData, 0)
			for _, userdata := range record.userdatas {
				if f(domain, domainType, userdata) == false {
					newUserdatas = append(newUserdatas, userdata)
				}
			}
			record.userdatas = newUserdatas
			if len(newUserdatas) == 0 {
				delDomains = append(delDomains, domain)
			}
		}
		for _, delDomain := range delDomains {
			delete(*domains, delDomain)
		}
	}

	switch domainType {
	case Base:
		d.baseDomains = *removeBase(domainType, d.baseDomains)
	case Suffix:
		d.suffixDomains = *removeBase(domainType, d.suffixDomains)
	case Pan:
		removeRegex(Pan, &d.panDomains)
	case Regex:
		removeRegex(Pan, &d.regexDomains)
	}

	d.cache = lru.New(d.cacheSize)
}
Example #27
0
func NewFileDns(fpath string) (*FileDNS, error) {
	fileDns := FileDNS{}
	fileDns.domains = make(map[string][]*IpRecord)
	fileDns.SpreadRecord = make(map[string]*SpreadRecordDomain)
	fileDns.cache = lru.New(500)

	f, err := os.Open(fpath)
	if err != nil {
		return nil, err
	}

	dec := json.NewDecoder(f)

	var r FileRecord
	for {
		if err := dec.Decode(&r); err == io.EOF {
			break
		} else if err != nil {
			glog.Warning(fmt.Sprintf("解析DNS记录文件错误:%v\r\n", err))
			continue
		}
		ip := IpRecord{r.Ip, r.Ping, DefaultCredit}

		for _, d := range r.Domain {
			d = strings.ToLower(d)
			if strings.ContainsAny(d, "*?") {
				// 泛解析类型
				v, ok := fileDns.SpreadRecord[d]
				if ok == false {
					quoteDomain := regexp.QuoteMeta(d)
					regexpDomain := strings.Replace(quoteDomain, `\*`, `[^.]+`, -1)
					regexpDomain = strings.Replace(regexpDomain, `\?`, `[^.]`, -1)
					regexpDomain = fmt.Sprint(`^`, regexpDomain, `$`)

					r, err := regexp.Compile(regexpDomain)
					if err != nil {
						return nil, err
					}

					v = &SpreadRecordDomain{r, make([]*IpRecord, 0, 1)}
					fileDns.SpreadRecord[d] = v
				}
				v.ips = append(v.ips, &ip)
			} else {
				//普通类型
				fileDns.domains[d] = append(fileDns.domains[d], &ip)
			}
		}
	}

	return &fileDns, nil
}
Example #28
0
func NewServer(bind string, maxEntries int, serverURL string) (*http.Server, error) {
	parsedURL, err := url.ParseRequestURI(serverURL)
	if err != nil {
		return nil, err
	}
	return &http.Server{
		Addr: bind,
		Handler: &Handler{
			cache:     lru.New(maxEntries),
			serverURL: parsedURL,
		},
	}, nil
}
Example #29
0
// NewMemStore creates a new MemStore. If maxKeys > 0, the number of different keys
// is restricted to the specified amount. In this case, it uses an LRU algorithm to
// evict older keys to make room for newer ones. If a request is made for a key that
// has been evicted, it will be processed as if its count was 0, possibly allowing requests
// that should be denied.
//
// If maxKeys <= 0, there is no limit on the number of keys, which may use an unbounded amount of
// memory depending on the server's load.
//
// The MemStore is only for single-process rate-limiting. To share the rate limit state
// among multiple instances of the web server, use a database- or key-value-based
// store.
//
func NewMemStore(maxKeys int) throttled.Store {
	var m *memStore
	if maxKeys > 0 {
		m = &memStore{
			keys: lru.New(maxKeys),
		}
	} else {
		m = &memStore{
			m: make(map[string]*counter),
		}
	}
	return m
}
Example #30
0
func (s *querysrv) Serve() {
	s.limiter = &safeCache{
		Cache: lru.New(lruSize),
	}

	if useHTTP {
		listener, err := net.Listen("tcp", s.addr)
		if err != nil {
			log.Println("Listen:", err)
			return
		}
		s.listener = listener
	} else {
		tlsCfg := &tls.Config{
			Certificates:           []tls.Certificate{s.cert},
			ClientAuth:             tls.RequestClientCert,
			SessionTicketsDisabled: true,
			MinVersion:             tls.VersionTLS12,
			CipherSuites: []uint16{
				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
				tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
				tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
				tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
				tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
			},
		}

		tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
		if err != nil {
			log.Println("Listen:", err)
			return
		}
		s.listener = tlsListener
	}

	http.HandleFunc("/v2/", s.handler)
	http.HandleFunc("/ping", handlePing)

	srv := &http.Server{
		ReadTimeout:    5 * time.Second,
		WriteTimeout:   5 * time.Second,
		MaxHeaderBytes: 1 << 10,
	}

	if err := srv.Serve(s.listener); err != nil {
		log.Println("Serve:", err)
	}
}