Esempio n. 1
0
File: redisstore.go Progetto: knq/kv
// New creates a RedisStore, and creates a redis connection pool for the
// supplied redis url. Written/read data in the store use the DefaultKeyPrefix
// or the provided key prefix for key retrieval/storage.
func New(redisURL string, keyPrefix ...string) (*RedisStore, error) {
	// determine key prefix
	keyPrefixVal := ""
	if len(keyPrefix) > 0 {
		keyPrefixVal = keyPrefix[0]
	}

	// parse url
	u, err := url.Parse(redisURL)
	if err != nil {
		return nil, err
	}

	// ensure that its a redis:// url
	if u.Scheme != "redis" {
		return nil, ErrInvalidScheme
	}

	// create pool
	p, err := pool.NewPool("tcp", u.Host, 1)
	if err != nil {
		return nil, err
	}

	return &RedisStore{
		KeyPrefix: keyPrefixVal,
		Pool:      p,
	}, nil
}
Esempio n. 2
0
// Creates a sentinel client. Connects to the given sentinel instance, pulls the
// information for the masters of the given names, and creates an intial pool of
// connections for each master. The client will automatically replace the pool
// for any master should sentinel decide to fail the master over. The returned
// error is a *ClientError.
func NewClient(
	network, address string, poolSize int, names ...string,
) (
	*Client, error,
) {

	// We use this to fetch initial details about masters before we upgrade it
	// to a pubsub client
	client, err := redis.Dial(network, address)
	if err != nil {
		return nil, &ClientError{err: err}
	}

	masterPools := map[string]*pool.Pool{}
	for _, name := range names {
		r := client.Cmd("SENTINEL", "MASTER", name)
		l, err := r.List()
		if err != nil {
			return nil, &ClientError{err: err, SentinelErr: true}
		}
		addr := l[3] + ":" + l[5]
		pool, err := pool.NewPool("tcp", addr, poolSize)
		if err != nil {
			return nil, &ClientError{err: err}
		}
		masterPools[name] = pool
	}

	subClient := pubsub.NewSubClient(client)
	r := subClient.Subscribe("+switch-master")
	if r.Err != nil {
		return nil, &ClientError{err: r.Err, SentinelErr: true}
	}

	c := &Client{
		poolSize:       poolSize,
		masterPools:    masterPools,
		subClient:      subClient,
		getCh:          make(chan *getReq),
		putCh:          make(chan *putReq),
		closeCh:        make(chan struct{}),
		alwaysErrCh:    make(chan *ClientError),
		switchMasterCh: make(chan *switchMaster),
	}

	go c.subSpin()
	go c.spin()
	return c, nil
}
Esempio n. 3
0
func NewRedisDataAccess(network string, addr string, appDomain string, poolSize int) NoDbDataAccess {
	da := new(RedisDataAccess)
	da.network = network
	da.addr = addr
	da.appDomain = appDomain

	da.zunionOutputKey = da.withDomain("unionoutput")
	pool, err := pool.NewPool(network, addr, poolSize)

	if err != nil {
		panic(err)
	}
	da.pool = pool
	return da
}
Esempio n. 4
0
// options sample:
//   `{  "addr": "127.0.0.1:6389",
//       "network":"tcp",
//       "db": 0,
//       "password": "",
//       "pools": 5
//    }`
func createRedisPool(options string) {
	var (
		err    error
		config struct {
			Addr     string
			Db       int
			Network  string
			Password string
			Pools    int
		}
	)

	if options != "" {
		err = json.Unmarshal([]byte(options), &config)
		if err != nil {
			//println("unmarshal failed:", err.Error())
			config.Addr = defaultAddr
			config.Network = defaultNetwork
			config.Pools = defaultPoolSize
		}
	}

	if config.Pools <= 0 {
		config.Pools = defaultPoolSize
	}
	if config.Addr == "" {
		config.Addr = defaultAddr
	}
	if config.Network == "" {
		config.Network = "tcp"
	}

	_pool, err = pool.NewPool(config.Network, config.Addr, config.Pools)
	if err != nil {
		panic(err.Error())
	}

}
Esempio n. 5
0
func TestReset(t *T) {
	// Simply initializing a cluster proves Reset works to some degree, since
	// NewCluster calls Reset
	cluster := getCluster(t)
	old7000Pool := cluster.pools["127.0.0.1:7000"]
	old7001Pool := cluster.pools["127.0.0.1:7001"]

	// We make a bogus client and add it to the cluster to prove that it gets
	// removed, since it's not needed
	p, err := pool.NewPool("tcp", "127.0.0.1:6379", 10)
	assert.Nil(t, err)
	cluster.pools["127.0.0.1:6379"] = p

	// We use resetInnerUsingPool so that we can specifically specify the pool
	// being used, so we don't accidentally use the 6379 one (which doesn't have
	// CLUSTER commands)
	respCh := make(chan bool)
	cluster.callCh <- func(c *Cluster) {
		err := cluster.resetInnerUsingPool("127.0.0.1:7000", old7000Pool)
		assert.Nil(t, err)
		respCh <- true
	}
	<-respCh

	// Prove that the bogus client is closed
	_, ok := cluster.pools["127.0.0.1:6379"]
	assert.Equal(t, false, ok)

	// Prove that the remaining two addresses are still in clients, were not
	// reconnected, and still work
	assert.Equal(t, 2, len(cluster.pools))
	assert.Equal(t, old7000Pool, cluster.pools["127.0.0.1:7000"])
	assert.Equal(t, old7001Pool, cluster.pools["127.0.0.1:7001"])
	assert.Nil(t, cluster.Cmd("GET", "foo").Err)
	assert.Nil(t, cluster.Cmd("GET", "bar").Err)
}