Ejemplo n.º 1
0
// reaper closes idle connections to the cluster.
func (c *ClusterClient) reaper(frequency time.Duration) {
	ticker := time.NewTicker(frequency)
	defer ticker.Stop()

	for _ = range ticker.C {
		if c.closed() {
			break
		}

		var n int
		for _, client := range c.getClients() {
			nn, err := client.connPool.(*pool.ConnPool).ReapStaleConns()
			if err != nil {
				internal.Logf("ReapStaleConns failed: %s", err)
			} else {
				n += nn
			}
		}

		s := c.PoolStats()
		internal.Logf(
			"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
			n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
		)
	}
}
Ejemplo n.º 2
0
func (c *PubSub) resubscribe() {
	if c.base.closed() {
		return
	}
	if len(c.channels) > 0 {
		if err := c.Subscribe(c.channels...); err != nil {
			internal.Logf("Subscribe failed: %s", err)
		}
	}
	if len(c.patterns) > 0 {
		if err := c.PSubscribe(c.patterns...); err != nil {
			internal.Logf("PSubscribe failed: %s", err)
		}
	}
}
Ejemplo n.º 3
0
// closeOldConns closes connections to the old master after failover switch.
func (d *sentinelFailover) closeOldConns(newMaster string) {
	// Good connections that should be put back to the pool. They
	// can't be put immediately, because pool.First will return them
	// again on next iteration.
	cnsToPut := make([]*pool.Conn, 0)

	for {
		cn := d.pool.PopFree()
		if cn == nil {
			break
		}
		if cn.RemoteAddr().String() != newMaster {
			err := fmt.Errorf(
				"sentinel: closing connection to the old master %s",
				cn.RemoteAddr(),
			)
			internal.Logf(err.Error())
			d.pool.Remove(cn, err)
		} else {
			cnsToPut = append(cnsToPut, cn)
		}
	}

	for _, cn := range cnsToPut {
		d.pool.Put(cn)
	}
}
Ejemplo n.º 4
0
Archivo: ring.go Proyecto: otsimo/watch
// heartbeat monitors state of each shard in the ring.
func (ring *Ring) heartbeat() {
	ticker := time.NewTicker(100 * time.Millisecond)
	defer ticker.Stop()
	for _ = range ticker.C {
		var rebalance bool

		ring.mx.RLock()

		if ring.closed {
			ring.mx.RUnlock()
			break
		}

		for _, shard := range ring.shards {
			err := shard.Client.Ping().Err()
			if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
				internal.Logf("ring shard state changed: %s", shard)
				rebalance = true
			}
		}

		ring.mx.RUnlock()

		if rebalance {
			ring.rebalance()
		}
	}
}
Ejemplo n.º 5
0
func (c *ClusterClient) reloadSlots() {
	defer atomic.StoreUint32(&c.reloading, 0)

	client, err := c.randomClient()
	if err != nil {
		internal.Logf("randomClient failed: %s", err)
		return
	}

	slots, err := client.ClusterSlots().Result()
	if err != nil {
		internal.Logf("ClusterSlots failed: %s", err)
		return
	}
	c.setSlots(slots)
}
Ejemplo n.º 6
0
// Close closes the client, releasing any open resources.
func (c *Multi) Close() error {
	c.closed = true
	if err := c.Unwatch().Err(); err != nil {
		internal.Logf("Unwatch failed: %s", err)
	}
	return c.base.Close()
}
Ejemplo n.º 7
0
func (d *sentinelFailover) MasterAddr() (string, error) {
	defer d.mu.Unlock()
	d.mu.Lock()

	// Try last working sentinel.
	if d.sentinel != nil {
		addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result()
		if err != nil {
			internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
			d._resetSentinel()
		} else {
			addr := net.JoinHostPort(addr[0], addr[1])
			internal.Logf("sentinel: %q addr is %s", d.masterName, addr)
			return addr, nil
		}
	}

	for i, sentinelAddr := range d.sentinelAddrs {
		sentinel := newSentinel(&Options{
			Addr: sentinelAddr,

			DialTimeout:  d.opt.DialTimeout,
			ReadTimeout:  d.opt.ReadTimeout,
			WriteTimeout: d.opt.WriteTimeout,

			PoolSize:    d.opt.PoolSize,
			PoolTimeout: d.opt.PoolTimeout,
			IdleTimeout: d.opt.IdleTimeout,
		})
		masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
		if err != nil {
			internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
			sentinel.Close()
			continue
		}

		// Push working sentinel to the top.
		d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]

		d.setSentinel(sentinel)
		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
		internal.Logf("sentinel: %q addr is %s", d.masterName, addr)
		return addr, nil
	}

	return "", errors.New("redis: all sentinels are unreachable")
}
Ejemplo n.º 8
0
func formatSec(dur time.Duration) string {
	if dur > 0 && dur < time.Second {
		internal.Logf(
			"specified duration is %s, but minimal supported value is %s",
			dur, time.Second,
		)
	}
	return formatInt(int64(dur / time.Second))
}
Ejemplo n.º 9
0
Archivo: pool.go Proyecto: otsimo/watch
func (p *ConnPool) reaper(frequency time.Duration) {
	ticker := time.NewTicker(frequency)
	defer ticker.Stop()

	for _ = range ticker.C {
		if p.Closed() {
			break
		}
		n, err := p.ReapStaleConns()
		if err != nil {
			internal.Logf("ReapStaleConns failed: %s", err)
			continue
		}
		s := p.Stats()
		internal.Logf(
			"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
			n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
		)
	}
}
Ejemplo n.º 10
0
Archivo: pool.go Proyecto: otsimo/watch
func (p *ConnPool) Put(cn *Conn) error {
	if cn.Rd.Buffered() != 0 {
		b, _ := cn.Rd.Peek(cn.Rd.Buffered())
		err := fmt.Errorf("connection has unread data: %q", b)
		internal.Logf(err.Error())
		return p.Remove(cn, err)
	}
	p.freeConnsMu.Lock()
	p.freeConns = append(p.freeConns, cn)
	p.freeConnsMu.Unlock()
	p.queue <- struct{}{}
	return nil
}
Ejemplo n.º 11
0
func (d *sentinelFailover) listen(sentinel *sentinelClient) {
	var pubsub *PubSub
	for {
		if pubsub == nil {
			pubsub = sentinel.PubSub()
			if err := pubsub.Subscribe("+switch-master"); err != nil {
				internal.Logf("sentinel: Subscribe failed: %s", err)
				d.resetSentinel()
				return
			}
		}

		msg, err := pubsub.ReceiveMessage()
		if err != nil {
			internal.Logf("sentinel: ReceiveMessage failed: %s", err)
			pubsub.Close()
			d.resetSentinel()
			return
		}

		switch msg.Channel {
		case "+switch-master":
			parts := strings.Split(msg.Payload, " ")
			if parts[0] != d.masterName {
				internal.Logf("sentinel: ignore new %s addr", parts[0])
				continue
			}

			addr := net.JoinHostPort(parts[3], parts[4])
			internal.Logf(
				"sentinel: new %q addr is %s",
				d.masterName, addr,
			)

			d.closeOldConns(addr)
		}
	}
}
Ejemplo n.º 12
0
func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
	sentinels, err := sentinel.Sentinels(d.masterName).Result()
	if err != nil {
		internal.Logf("sentinel: Sentinels %q failed: %s", d.masterName, err)
		return
	}
	for _, sentinel := range sentinels {
		vals := sentinel.([]interface{})
		for i := 0; i < len(vals); i += 2 {
			key := vals[i].(string)
			if key == "name" {
				sentinelAddr := vals[i+1].(string)
				if !contains(d.sentinelAddrs, sentinelAddr) {
					internal.Logf(
						"sentinel: discovered new %q sentinel: %s",
						d.masterName, sentinelAddr,
					)
					d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
				}
			}
		}
	}
}
Ejemplo n.º 13
0
func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) {
	var errNum uint
	for {
		msgi, err := c.ReceiveTimeout(timeout)
		if err != nil {
			if !isNetworkError(err) {
				return nil, err
			}

			errNum++
			if errNum < 3 {
				if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
					err := c.Ping("")
					if err != nil {
						internal.Logf("PubSub.Ping failed: %s", err)
					}
				}
			} else {
				// 3 consequent errors - connection is bad
				// and/or Redis Server is down.
				// Sleep to not exceed max number of open connections.
				time.Sleep(time.Second)
			}
			continue
		}

		// Reset error number, because we received a message.
		errNum = 0

		switch msg := msgi.(type) {
		case *Subscription:
			// Ignore.
		case *Pong:
			// Ignore.
		case *Message:
			return msg, nil
		case *PMessage:
			return &Message{
				Channel: msg.Channel,
				Pattern: msg.Pattern,
				Payload: msg.Payload,
			}, nil
		default:
			return nil, fmt.Errorf("redis: unknown message: %T", msgi)
		}
	}
}