Beispiel #1
0
// NewClient creates a sentinel client. Connects to the given sentinel instance,
// pulls the information for the masters of the given names, and creates an
// intial pool of connections for each master. The client will automatically
// replace the pool for any master should sentinel decide to fail the master
// over. The returned error is a *ClientError.
func NewClient(
	network, address string, poolSize int, names ...string,
) (
	*Client, error,
) {

	// We use this to fetch initial details about masters before we upgrade it
	// to a pubsub client
	client, err := redis.Dial(network, address)
	if err != nil {
		return nil, &ClientError{err: err}
	}

	masterPools := map[string]*pool.Pool{}
	for _, name := range names {
		r := client.Cmd("SENTINEL", "MASTER", name)
		l, err := r.List()
		if err != nil {
			return nil, &ClientError{err: err, SentinelErr: true}
		}
		addr := l[3] + ":" + l[5]
		pool, err := pool.New("tcp", addr, poolSize)
		if err != nil {
			return nil, &ClientError{err: err}
		}
		masterPools[name] = pool
	}

	subClient := pubsub.NewSubClient(client)
	subSendErr := subClient.Subscribe("+switch-master")
	if subSendErr != nil {
		return nil, &ClientError{err: subSendErr, SentinelErr: true}
	}
	r := client.ReadResp()
	if r.Err != nil {
		return nil, &ClientError{err: r.Err, SentinelErr: true}
	}

	c := &Client{
		poolSize:       poolSize,
		masterPools:    masterPools,
		subClient:      subClient,
		getCh:          make(chan *getReq),
		putCh:          make(chan *putReq),
		closeCh:        make(chan struct{}),
		alwaysErrCh:    make(chan *ClientError),
		switchMasterCh: make(chan *switchMaster),
	}

	go c.subSpin()
	go c.spin()
	return c, nil
}
Beispiel #2
0
func (c *Client) spin() {
	for {
		select {
		case req := <-c.getCh:
			if c.alwaysErr != nil {
				req.retCh <- &getReqRet{nil, c.alwaysErr}
				continue
			}
			pool, ok := c.masterPools[req.name]
			if !ok {
				err := errors.New("unknown name: " + req.name)
				req.retCh <- &getReqRet{nil, &ClientError{err: err}}
				continue
			}
			conn, err := pool.Get()
			if err != nil {
				req.retCh <- &getReqRet{nil, &ClientError{err: err}}
				continue
			}
			req.retCh <- &getReqRet{conn, nil}

		case req := <-c.putCh:
			if pool, ok := c.masterPools[req.name]; ok {
				pool.Put(req.conn)
			}

		case err := <-c.alwaysErrCh:
			c.alwaysErr = err

		case sm := <-c.switchMasterCh:
			if p, ok := c.masterPools[sm.name]; ok {
				p.Empty()
				p, _ = pool.New("tcp", sm.addr, c.poolSize)
				c.masterPools[sm.name] = p
			}

		case <-c.closeCh:
			for name := range c.masterPools {
				c.masterPools[name].Empty()
			}
			c.subClient.Client.Close()
			close(c.getCh)
			close(c.putCh)
			return
		}
	}
}
Beispiel #3
0
func TestReset(t *T) {
	// Simply initializing a cluster proves Reset works to some degree, since
	// NewCluster calls Reset
	cluster := getCluster(t)
	old7000Pool := cluster.pools["127.0.0.1:7000"]
	old7001Pool := cluster.pools["127.0.0.1:7001"]

	// We make a bogus client and add it to the cluster to prove that it gets
	// removed, since it's not needed
	p, err := pool.New("tcp", "127.0.0.1:6379", 10)
	assert.Nil(t, err)
	cluster.pools["127.0.0.1:6379"] = p

	// We use resetInnerUsingPool so that we can specifically specify the pool
	// being used, so we don't accidentally use the 6379 one (which doesn't have
	// CLUSTER commands)
	respCh := make(chan bool)
	cluster.callCh <- func(c *Cluster) {
		err := cluster.resetInnerUsingPool(old7000Pool)
		assert.Nil(t, err)
		respCh <- true
	}
	<-respCh

	// Prove that the bogus client is closed
	_, ok := cluster.pools["127.0.0.1:6379"]
	assert.Equal(t, false, ok)

	// Prove that the remaining two addresses are still in clients, were not
	// reconnected, and still work
	assert.Equal(t, 2, len(cluster.pools))
	assert.Equal(t, old7000Pool, cluster.pools["127.0.0.1:7000"])
	assert.Equal(t, old7001Pool, cluster.pools["127.0.0.1:7001"])
	assert.Nil(t, cluster.Cmd("GET", "foo").Err)
	assert.Nil(t, cluster.Cmd("GET", "bar").Err)
}