// reaper closes idle connections to the cluster. func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { ticker := time.NewTicker(idleCheckFrequency) defer ticker.Stop() for _ = range ticker.C { nodes := c.getNodes() if nodes == nil { break } var n int for _, node := range nodes { nn, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() if err != nil { internal.Logf("ReapStaleConns failed: %s", err) } else { n += nn } } s := c.PoolStats() internal.Logf( "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)", n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts, ) } }
func (c *PubSub) resubscribe() { if c.base.closed() { return } if len(c.channels) > 0 { if err := c.Subscribe(c.channels...); err != nil { internal.Logf("Subscribe failed: %s", err) } } if len(c.patterns) > 0 { if err := c.PSubscribe(c.patterns...); err != nil { internal.Logf("PSubscribe failed: %s", err) } } }
func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { cmdInfo := c.cmdInfo(cmd.arg(0)) if cmdInfo == nil { internal.Logf("info for cmd=%s not found", cmd.arg(0)) node, err := c.randomNode() return 0, node, err } if cmdInfo.FirstKeyPos == -1 { node, err := c.randomNode() return 0, node, err } firstKey := cmd.arg(int(cmdInfo.FirstKeyPos)) slot := hashtag.Slot(firstKey) if cmdInfo.ReadOnly && c.opt.ReadOnly { if c.opt.RouteByLatency { node, err := c.slotClosestNode(slot) return slot, node, err } node, err := c.slotSlaveNode(slot) return slot, node, err } node, err := c.slotMasterNode(slot) return slot, node, err }
// closeOldConns closes connections to the old master after failover switch. func (d *sentinelFailover) closeOldConns(newMaster string) { // Good connections that should be put back to the pool. They // can't be put immediately, because pool.First will return them // again on next iteration. cnsToPut := make([]*pool.Conn, 0) for { cn := d.pool.PopFree() if cn == nil { break } if cn.RemoteAddr().String() != newMaster { err := fmt.Errorf( "sentinel: closing connection to the old master %s", cn.RemoteAddr(), ) internal.Logf(err.Error()) d.pool.Remove(cn, err) } else { cnsToPut = append(cnsToPut, cn) } } for _, cn := range cnsToPut { d.pool.Put(cn) } }
// heartbeat monitors state of each shard in the ring. func (c *Ring) heartbeat() { ticker := time.NewTicker(c.opt.HeartbeatFrequency) defer ticker.Stop() for _ = range ticker.C { var rebalance bool c.mu.RLock() if c.closed { c.mu.RUnlock() break } for _, shard := range c.shards { err := shard.Client.Ping().Err() if shard.Vote(err == nil || err == pool.ErrPoolTimeout) { internal.Logf("ring shard state changed: %s", shard) rebalance = true } } c.mu.RUnlock() if rebalance { c.rebalance() } } }
func (d *sentinelFailover) MasterAddr() (string, error) { defer d.mu.Unlock() d.mu.Lock() // Try last working sentinel. if d.sentinel != nil { addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result() if err != nil { internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) d._resetSentinel() } else { addr := net.JoinHostPort(addr[0], addr[1]) internal.Logf("sentinel: %q addr is %s", d.masterName, addr) return addr, nil } } for i, sentinelAddr := range d.sentinelAddrs { sentinel := newSentinel(&Options{ Addr: sentinelAddr, DialTimeout: d.opt.DialTimeout, ReadTimeout: d.opt.ReadTimeout, WriteTimeout: d.opt.WriteTimeout, PoolSize: d.opt.PoolSize, PoolTimeout: d.opt.PoolTimeout, IdleTimeout: d.opt.IdleTimeout, }) masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() if err != nil { internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) sentinel.Close() continue } // Push working sentinel to the top. d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] d.setSentinel(sentinel) addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) internal.Logf("sentinel: %q addr is %s", d.masterName, addr) return addr, nil } return "", errors.New("redis: all sentinels are unreachable") }
func (c *Ring) cmdFirstKey(cmd Cmder) string { cmdInfo := c.cmdInfo(cmd.arg(0)) if cmdInfo == nil { internal.Logf("info for cmd=%s not found", cmd.arg(0)) return "" } return cmd.arg(int(cmdInfo.FirstKeyPos)) }
// close closes the transaction, releasing any open resources. func (c *Tx) close() error { if c.closed { return nil } c.closed = true if err := c.Unwatch().Err(); err != nil { internal.Logf("Unwatch failed: %s", err) } return c.baseClient.Close() }
func (p *ConnPool) reaper(frequency time.Duration) { ticker := time.NewTicker(frequency) defer ticker.Stop() for _ = range ticker.C { if p.Closed() { break } n, err := p.ReapStaleConns() if err != nil { internal.Logf("ReapStaleConns failed: %s", err) continue } s := p.Stats() internal.Logf( "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)", n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts, ) } }
func (p *ConnPool) Put(cn *Conn) error { if data := cn.Rd.PeekBuffered(); data != nil { err := fmt.Errorf("connection has unread data: %q", data) internal.Logf(err.Error()) return p.Remove(cn, err) } p.freeConnsMu.Lock() p.freeConns = append(p.freeConns, cn) p.freeConnsMu.Unlock() <-p.queue return nil }
func (d *sentinelFailover) listen(sentinel *sentinelClient) { var pubsub *PubSub for { if pubsub == nil { pubsub = sentinel.PubSub() if err := pubsub.Subscribe("+switch-master"); err != nil { internal.Logf("sentinel: Subscribe failed: %s", err) d.resetSentinel() return } } msg, err := pubsub.ReceiveMessage() if err != nil { internal.Logf("sentinel: ReceiveMessage failed: %s", err) pubsub.Close() d.resetSentinel() return } switch msg.Channel { case "+switch-master": parts := strings.Split(msg.Payload, " ") if parts[0] != d.masterName { internal.Logf("sentinel: ignore new %s addr", parts[0]) continue } addr := net.JoinHostPort(parts[3], parts[4]) internal.Logf( "sentinel: new %q addr is %s", d.masterName, addr, ) d.closeOldConns(addr) } } }
func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { sentinels, err := sentinel.Sentinels(d.masterName).Result() if err != nil { internal.Logf("sentinel: Sentinels %q failed: %s", d.masterName, err) return } for _, sentinel := range sentinels { vals := sentinel.([]interface{}) for i := 0; i < len(vals); i += 2 { key := vals[i].(string) if key == "name" { sentinelAddr := vals[i+1].(string) if !contains(d.sentinelAddrs, sentinelAddr) { internal.Logf( "sentinel: discovered new %q sentinel: %s", d.masterName, sentinelAddr, ) d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) } } } } }
func (c *ClusterClient) reloadSlots() { defer atomic.StoreUint32(&c.reloading, 0) node, err := c.randomNode() if err != nil { return } slots, err := node.Client.ClusterSlots().Result() if err != nil { internal.Logf("ClusterSlots on addr=%q failed: %s", node.Client.getAddr(), err) return } c.setSlots(slots) if c.opt.RouteByLatency { c.setNodesLatency() } }
func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) { var errNum uint for { msgi, err := c.ReceiveTimeout(timeout) if err != nil { if !errors.IsNetwork(err) { return nil, err } errNum++ if errNum < 3 { if netErr, ok := err.(net.Error); ok && netErr.Timeout() { err := c.Ping("") if err != nil { internal.Logf("PubSub.Ping failed: %s", err) } } } else { // 3 consequent errors - connection is bad // and/or Redis Server is down. // Sleep to not exceed max number of open connections. time.Sleep(time.Second) } continue } // Reset error number, because we received a message. errNum = 0 switch msg := msgi.(type) { case *Subscription: // Ignore. case *Pong: // Ignore. case *Message: return msg, nil default: return nil, fmt.Errorf("redis: unknown message: %T", msgi) } } }