Beispiel #1
0
// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
	if httpPoolMade {
		panic("groupcache: NewHTTPPool must be called only once")
	}
	httpPoolMade = true

	opts := HTTPPoolOptions{}
	if o != nil {
		opts = *o
	}
	if opts.BasePath == "" {
		opts.BasePath = defaultBasePath
	}
	if opts.Replicas == 0 {
		opts.Replicas = defaultReplicas
	}

	p := &HTTPPool{
		basePath: opts.BasePath,
		self:     self,
		peers:    consistenthash.New(opts.Replicas, opts.HashFn),
	}
	RegisterPeerPicker(func() PeerPicker { return p })
	return p
}
Beispiel #2
0
func (gp *GRPCPool) Set(peers ...string) {
	gp.mu.Lock()
	defer gp.mu.Unlock()
	gp.peers = consistenthash.New(gp.opts.Replicas, gp.opts.HashFn)
	tempGetters := make(map[string]*grpcGetter, len(peers))
	for _, peer := range peers {
		if getter, exists := gp.grpcGetters[peer]; exists == true {
			tempGetters[peer] = getter
			delete(gp.grpcGetters, peer)
		} else {
			getter, err := newGRPCGetter(peer, gp.opts.PeerDialOptions...)
			if err != nil {
				log.WithError(err).Warnf("Failed to open connection to [%s]", peer)
			} else {
				tempGetters[peer] = getter
				gp.peers.Add(peer)
			}
		}
	}

	for p, g := range gp.grpcGetters {
		g.close()
		delete(gp.grpcGetters, p)
	}

	gp.grpcGetters = tempGetters
}
Beispiel #3
0
func NewGRPCPoolOptions(self string, server *grpc.Server, opts *GRPCPoolOptions) *GRPCPool {
	if grpcPoolCreated {
		panic("NewGRPCPool must be called only once")
	}

	grpcPoolCreated = true

	pool := &GRPCPool{
		self:        self,
		grpcGetters: make(map[string]*grpcGetter),
	}

	if opts != nil {
		pool.opts = *opts
	}

	if pool.opts.Replicas == 0 {
		pool.opts.Replicas = defaultReplicas
	}

	if pool.opts.PeerDialOptions == nil {
		pool.opts.PeerDialOptions = []grpc.DialOption{grpc.WithInsecure()}
	}

	pool.peers = consistenthash.New(pool.opts.Replicas, pool.opts.HashFn)
	groupcache.RegisterPeerPicker(func() groupcache.PeerPicker { return pool })
	gcgrpc.RegisterPeerServer(server, pool)
	return pool
}
Beispiel #4
0
// NewPeersPool initializes an HTTP pool of peers.
// It registers itself as a PeerPicker and as an HTTP handler with the
// http.DefaultServeMux.
// The self argument be a valid base URL that points to the current server,
// for example "http://example.net:8000".
func NewPeersPool(self string) *PeersPool {
	if httpPoolMade {
		panic("groupcache: NewPeersPool must be called only once")
	}
	httpPoolMade = true
	p := &PeersPool{basePath: defaultBasePath, self: self, peers: consistenthash.New(defaultReplicas, nil)}
	RegisterPeerPicker(func() PeerPicker { return p })
	http.Handle(defaultBasePath, p)
	return p
}
Beispiel #5
0
// Set updates the pool's list of peers.
// Each peer value should be a valid base URL,
// for example "http://example.net:8000".
func (p *HTTPPool) Set(peers ...string) {
	p.mu.Lock()
	defer p.mu.Unlock()
	p.peers = consistenthash.New(defaultReplicas, nil)
	p.peers.Add(peers...)
	p.httpGetters = make(map[string]*httpGetter, len(peers))
	for _, peer := range peers {
		p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.basePath}
	}
}
Beispiel #6
0
// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
	if httpPoolMade {
		panic("groupcache: NewHTTPPool must be called only once")
	}
	httpPoolMade = true

	p := &HTTPPool{
		self:        self,
		httpGetters: make(map[string]*httpGetter),
	}
	if o != nil {
		p.opts = *o
	}
	if p.opts.BasePath == "" {
		p.opts.BasePath = defaultBasePath
	}
	if p.opts.Replicas == 0 {
		p.opts.Replicas = defaultReplicas
	}
	p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)

	RegisterPeerPicker(func() PeerPicker { return p })
	return p
}
Beispiel #7
0
// Set updates the pool's list of peers.
// Each peer value should be a valid base URL,
// for example "http://example.net:8000".
func (p *HTTPPool) Set(peers ...string) {
	p.mu.Lock()
	defer p.mu.Unlock()
	p.peers = consistenthash.New(defaultReplicas, nil)
	p.peers.Add(peers...)
}
Beispiel #8
0
func newConsistentHashRes(
	em ephemeral.Ephemeral,
	root string,
	ipport string,
	replica int,
	timeout time.Duration,
	counter stats.Client,
) (*ConsistentHashRes, error) {
	c := &ConsistentHashRes{
		ephemeral: em,
		root:      root,
		cmap:      consistenthash.New(replica, murmur3.Sum32),
		done:      make(chan struct{}),
		ipport:    ipport,
		ctr:       counter,
	}

	// ensure root path
	if err := ephemeral.EnsurePath(em, root); err != nil {
		return nil, err
	}

	//if I am a server then register
	if ipport != "" {
		if _, _, err := net.SplitHostPort(ipport); err != nil {
			log.Errorf("incoming hostport %s isn't in host:port format, err %v", ipport, err)
			return nil, err
		}
		node := makeNode(root, ipport)
		createNode(em, node)
	}

	ready := make(chan struct{})
	var retErr error

	//listen to server events
	go func() {
		readySent := false
		receiver := c.ephemeral.List(context.Background(), c.root, true)

	zkloop:
		// TODO: add maxRetries
		for {
			var resp *ephemeral.ListResponse
			select {
			case <-c.done: // signal done received
				c.ctr.BumpSum("loop.done", 1)
				break zkloop
			case resp = <-receiver:
				if resp.Err == ephemeral.ErrPathNotFound {
					log.Fatalf("[shard] root directory<%s> not found", c.root)
				}
				if resp.Err != nil {
					// TODO: handle conn.close
					// when conn is closed, we will get an ErrSessionExired
					// vendor/src/github.com/samuel/go-zookeeper/shard/zk_test.go
					// line 370
					c.ctr.BumpSum("loop.setwatch.err", 1)
					log.Errorf("[shard] fail to watch %v err: %v", c.root, resp.Err)
					retErr = resp.Err
					time.Sleep(timeWait)
					// Re-assign receiver.
					receiver = c.ephemeral.List(context.Background(), c.root, true)
					continue
				}
				c.ctr.BumpSum("loop.zkchange", 1)
			}

			log.Infof("[shard] in consistentres, root:%s, children: %v", c.root, resp.Children)

			cmap := consistenthash.New(replica, murmur3.Sum32)
			keys := make(map[string]struct{})

			// The list will be reallocated by append() if size is not enough
			hosts := make([]string, 0, maxHosts)
			for _, child := range resp.Children {
				ipport, err := ExtractIPPort(child)
				if err != nil {
					c.ctr.BumpSum("loop.parse.err", 1)
					log.Errorf("[shard] parse error root %v, node %v err %v",
						c.root, child, err)
					continue
				}
				if _, ok := keys[ipport]; ok {
					c.ctr.BumpSum("loop.dupkey.warn", 1)
					log.Infof("[shard] duplicated shard info %v %v", c.root, ipport)
					continue
				}
				keys[ipport] = struct{}{}
				hosts = append(hosts, ipport)
			}
			cmap.Add(hosts...)

			//replace the old cmap
			c.setCmap(cmap, keys)

			//signal ready
			if !readySent {
				// if ready, clear previous err
				retErr = nil
				c.ctr.BumpSum("loop.ready", 1)
				ready <- struct{}{}
				readySent = true
			}
		}
		close(ready)
	}()

	// wait till ready
	select {
	case <-ready:
		if retErr != nil {
			c.ctr.BumpSum("newhash.init.err", 1)
			return nil, retErr
		}
		c.ctr.BumpSum("newhash.ready", 1)
	case <-time.After(timeout):
		c.ctr.BumpSum("newhash.timeout.err", 1)
		log.Errorf("[shard] consistent hash init timeout %v", c.root)
		return nil, ErrConnTimedOut
	}

	return c, nil
}
Beispiel #9
0
func (c *CHash) SetBuckets(buckets []string) error {
	c.m = consistenthash.New(160, leveldbHash)
	c.s = buckets
	c.m.Add(buckets...)
	return nil
}
Beispiel #10
0
func (c *ConnHashTestSuite) TestConsistentHashRes() {
	//creates two servers 127.0.0.1:8080 and 192.168.0.1:81
	//testing key uid[1-10]. Compare the result against
	//direct hash calculation from consistenthash.Map

	//first connect server 1 and server 2

	//consistent server 1
	t := c.T()
	em1, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)

	if err != nil {
		t.Fatalf("Connect to zk error for server1: %s", err)
	}
	conn1, err := NewConsistentHashResServer(em1, testEmRoot, svr1,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 1 %s create failed:%s", svr1, err)
	}
	assert.Equal(t, conn1.HostPort(), svr1)

	// wait zk change to stablize
	time.Sleep(1 * time.Second)
	assert.True(t, conn1.IsMyKey("any keys"), "should always be true since only one server only")

	//consistent server 2
	em2, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	if err != nil {
		t.Fatalf("Connect to zk error for server2: %s", err)
	}
	conn2, err := NewConsistentHashResServer(em2, testEmRoot, svr2,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 2 %s create failed:%s", svr2, err)
	}
	assert.Equal(t, conn2.HostPort(), svr2)

	//client
	emClient, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	assert.NoError(t, err)
	client, err := NewConsistentHashResClient(emClient, testEmRoot,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent client create failed:%s", err)
	}
	assert.Equal(t, client.HostPort(), "")

	//add server 1 and 2
	cmap := consistenthash.New(ConsistentHashMapReplicaNum, murmur3.Sum32)
	cmap.Add(svr1, svr2)

	//verify hashes are the same across all instances
	verifyAnswer(t, cmap, conn1, conn2, client)
	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2})
	// verify shard assignment distribution
	verifyShardDist(t, client, 2, 1000)

	//add another server
	em3, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	if err != nil {
		t.Fatalf("Connect to zk error for server3: %s", err)
	}
	conn3, err := NewConsistentHashResServer(em3, testEmRoot, svr3,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 3 %s create failed:%s", svr3, err)
	}
	assert.Equal(t, conn3.HostPort(), svr3)

	cmap.Add(svr3)

	//verify hashes are the same across all instances
	verifyAnswer(t, cmap, conn1, conn3, client, conn1)
	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2, svr3})
	// verify shard assignment distribution
	verifyShardDist(t, client, 3, 1000)

	// when zk are unreachable for like 20 seconds
	// all znodes are expired due to clent session is expired by zk

	// when the zkconn is back again, we still can do sharding
	c.stopForward <- struct{}{}
	time.Sleep(10 * time.Second)

	// make conn alive
	c.stopForward, _ = c.forwarder()
	time.Sleep(time.Second) // wait one second for ready

	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2, svr3})
	// verify shard assignment distribution
	verifyShardDist(t, client, 3, 1000)

	conn1.Close()
	conn2.Close()
	conn3.Close()
	client.Close()
	emClient.Close()
	em1.Close()
	em2.Close()
	em3.Close()
}