func newSession( ctx scope.Context, server *Server, conn *websocket.Conn, roomName string, room proto.Room, client *proto.Client, agentKey *security.ManagedKey) *session { nextID := atomic.AddUint64(&sessionIDCounter, 1) sessionCount.WithLabelValues(roomName).Set(float64(nextID)) sessionID := fmt.Sprintf("%x-%08x", client.Agent.IDString(), nextID) ctx = LoggingContext(ctx, fmt.Sprintf("[%s] ", sessionID)) session := &session{ id: sessionID, ctx: ctx, server: server, conn: conn, identity: newMemIdentity(client.UserID(), server.ID, server.Era), client: client, agentKey: agentKey, serverID: server.ID, serverEra: server.Era, roomName: roomName, room: room, backend: server.b, kms: server.kms, incoming: make(chan *proto.Packet), outgoing: make(chan *proto.Packet, 100), floodLimiter: ratelimit.NewBucketWithQuantum(time.Second, 50, 10), } return session }
// Run executes the fetcher, starting as many parallel reads as specified by // the MaxParallel option and returns when the read has finished, failed, or // been stopped. func (f *Fetcher) Run() error { errChan := make(chan error, f.MaxParallel) f.stopRequest = make(chan struct{}, 2) f.stopNotify = make(chan struct{}) f.limitCalc = newLimitCalc(limitCalcSize) if f.ReadCapacity > 0 { f.rateLimit = ratelimit.NewBucketWithQuantum(time.Second, int64(f.ReadCapacity), int64(f.ReadCapacity)) } go func() { <-f.stopRequest close(f.stopNotify) // fanout }() for i := int64(0); i < int64(f.MaxParallel); i++ { go f.processSegment(i, errChan) } var err error // wait for all workers to shutdown for i := 0; i < f.MaxParallel; i++ { if werr := <-errChan; werr != nil { if err == nil { err = werr f.stopRequest <- struct{}{} } } } return err }
// retrieve a token bucket from the cache, keyed by ip address func getRateLimit(key string) *ratelimit.Bucket { // get or create the token bucket for this ip address cache.ContainsOrAdd(key, ratelimit.NewBucketWithQuantum(time.Second, burst, packetsPerSecond)) if bkt, ok := cache.Get(key); ok { return bkt.(*ratelimit.Bucket) } // if for some reasons the bucket gets evicted between adding and reading // just try again ... return getRateLimit(key) }
// Run executes the loader, starting goroutines to execute parallel puts // as required. Returns when the load has finished, failed or been stopped. func (ld *Loader) Run() error { errChan := make(chan error, ld.MaxParallel) itemsChan := make(chan map[string]*dynamodb.AttributeValue) readDone := make(chan error) ld.stopRequest = make(chan struct{}, 2) ld.stopNotify = make(chan struct{}) if ld.WriteCapacity > 0 { ld.rateLimit = &rateLimitWaiter{ Bucket: ratelimit.NewBucketWithQuantum(time.Second, int64(ld.WriteCapacity), int64(ld.WriteCapacity)), stopNotify: ld.stopNotify, } } go func() { <-ld.stopRequest close(ld.stopNotify) // fanout }() go func() { var rc int64 for { select { case <-ld.stopNotify: readDone <- nil return default: item, err := ld.Source.ReadItem() if err == io.EOF { readDone <- nil return } else if err != nil { readDone <- err return } itemsChan <- item rc++ if rc == ld.MaxItems { readDone <- nil return } } } }() for i := int64(0); i < int64(ld.MaxParallel); i++ { go ld.load(itemsChan, errChan) } // wait for either the reader or a writer to finish or fail rem := ld.MaxParallel var err error select { case err = <-readDone: // reader exited ld.Stop() case err = <-errChan: rem-- ld.Stop() } // wait for all workers to shutdown for i := 0; i < rem; i++ { if werr := <-errChan; werr != nil { if err == nil { err = werr ld.stopRequest <- struct{}{} } } } return err }
func (r *Registry) newFunc() *ratelimit.Bucket { return ratelimit.NewBucketWithQuantum(r.per, int64(r.rate), int64(r.rate)) }