Beispiel #1
0
// NewPublisherDetailed is the same as NewPublisher, but allows users to provide
// an explicit lookup refresh ticker instead of a TTL, and specify the function
// used to perform lookups instead of using net.LookupSRV.
func NewPublisherDetailed(
	name string,
	refreshTicker *time.Ticker,
	lookupSRV func(service, proto, name string) (cname string, addrs []*net.SRV, err error),
	factory loadbalancer.Factory,
	logger log.Logger,
) *Publisher {
	p := &Publisher{
		name:   name,
		cache:  loadbalancer.NewEndpointCache(factory, logger),
		logger: logger,
		quit:   make(chan struct{}),
	}

	instances, err := p.resolve(lookupSRV)
	if err == nil {
		logger.Log("name", name, "instances", len(instances))
	} else {
		logger.Log("name", name, "err", err)
	}
	p.cache.Replace(instances)

	go p.loop(refreshTicker, lookupSRV)
	return p
}
Beispiel #2
0
// NewPublisher returns a ZooKeeper publisher. ZooKeeper will start watching the
// given path for changes and update the Publisher endpoints.
func NewPublisher(c Client, path string, f loadbalancer.Factory, logger log.Logger) (*Publisher, error) {
	p := &Publisher{
		client: c,
		path:   path,
		cache:  loadbalancer.NewEndpointCache(f, logger),
		logger: logger,
		quit:   make(chan struct{}),
	}

	err := p.client.CreateParentNodes(p.path)
	if err != nil {
		return nil, err
	}

	// intial node retrieval and cache fill
	instances, eventc, err := p.client.GetEntries(p.path)
	if err != nil {
		logger.Log("path", p.path, "msg", "failed to retrieve entries", "err", err)
		return nil, err
	}
	logger.Log("path", p.path, "instances", len(instances))
	p.cache.Replace(instances)

	// handle incoming path updates
	go p.loop(eventc)

	return p, nil
}
Beispiel #3
0
// NewPublisher returns a Consul publisher which returns Endpoints for the
// requested service. It only returns instances for which all of the passed
// tags are present.
func NewPublisher(
	client Client,
	factory loadbalancer.Factory,
	logger log.Logger,
	service string,
	tags ...string,
) (*Publisher, error) {
	p := &Publisher{
		cache:   loadbalancer.NewEndpointCache(factory, logger),
		client:  client,
		logger:  logger,
		service: service,
		tags:    tags,
		quitc:   make(chan struct{}),
	}

	instances, index, err := p.getInstances(defaultIndex)
	if err == nil {
		logger.Log("service", service, "tags", strings.Join(tags, ", "), "instances", len(instances))
	} else {
		logger.Log("service", service, "tags", strings.Join(tags, ", "), "err", err)
	}
	p.cache.Replace(instances)

	go p.loop(index)

	return p, nil
}
Beispiel #4
0
func TestEndpointCache(t *testing.T) {
	var (
		e  = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
		ca = make(closer)
		cb = make(closer)
		c  = map[string]io.Closer{"a": ca, "b": cb}
		f  = func(s string) (endpoint.Endpoint, io.Closer, error) { return e, c[s], nil }
		ec = loadbalancer.NewEndpointCache(f, log.NewNopLogger())
	)

	// Populate
	ec.Replace([]string{"a", "b"})
	select {
	case <-ca:
		t.Errorf("endpoint a closed, not good")
	case <-cb:
		t.Errorf("endpoint b closed, not good")
	case <-time.After(time.Millisecond):
		t.Logf("no closures yet, good")
	}

	// Duplicate, should be no-op
	ec.Replace([]string{"a", "b"})
	select {
	case <-ca:
		t.Errorf("endpoint a closed, not good")
	case <-cb:
		t.Errorf("endpoint b closed, not good")
	case <-time.After(time.Millisecond):
		t.Logf("no closures yet, good")
	}

	// Delete b
	go ec.Replace([]string{"a"})
	select {
	case <-ca:
		t.Errorf("endpoint a closed, not good")
	case <-cb:
		t.Logf("endpoint b closed, good")
	case <-time.After(time.Millisecond):
		t.Errorf("didn't close the deleted instance in time")
	}

	// Delete a
	go ec.Replace([]string{""})
	select {
	// case <-cb: will succeed, as it's closed
	case <-ca:
		t.Logf("endpoint a closed, good")
	case <-time.After(time.Millisecond):
		t.Errorf("didn't close the deleted instance in time")
	}
}
Beispiel #5
0
// NewPublisher returs a etcd publisher. Etcd will start watching the given
// prefix for changes and update the Publisher endpoints.
func NewPublisher(c Client, prefix string, f loadbalancer.Factory, logger log.Logger) (*Publisher, error) {
	p := &Publisher{
		client: c,
		prefix: prefix,
		cache:  loadbalancer.NewEndpointCache(f, logger),
		logger: logger,
		quit:   make(chan struct{}),
	}

	instances, err := p.client.GetEntries(p.prefix)
	if err == nil {
		logger.Log(p.prefix, len(instances))
	} else {
		logger.Log("msg", "failed to retrieve entries", "err", err)
	}
	p.cache.Replace(instances)

	go p.loop()
	return p, nil
}
func BenchmarkEndpoints(b *testing.B) {
	var (
		e  = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
		ca = make(closer)
		cb = make(closer)
		c  = map[string]io.Closer{"a": ca, "b": cb}
		f  = func(s string) (endpoint.Endpoint, io.Closer, error) { return e, c[s], nil }
		ec = loadbalancer.NewEndpointCache(f, log.NewNopLogger())
	)

	b.ReportAllocs()

	ec.Replace([]string{"a", "b"})

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			ec.Endpoints()
		}
	})
}
Beispiel #7
0
// NewPublisher returns a DNS SRV publisher. The name is resolved
// synchronously as part of construction; if that resolution fails, the
// constructor will return an error. The factory is used to convert a
// host:port to a usable endpoint. The logger is used to report DNS and
// factory errors.
func NewPublisher(name string, ttl time.Duration, factory loadbalancer.Factory, logger log.Logger) *Publisher {
	p := &Publisher{
		name:   name,
		ttl:    ttl,
		cache:  loadbalancer.NewEndpointCache(factory, logger),
		logger: logger,
		quit:   make(chan struct{}),
	}

	instances, err := p.resolve()
	if err == nil {
		logger.Log("name", name, "instances", len(instances))
	} else {
		logger.Log("name", name, "err", err)
	}
	p.cache.Replace(instances)

	go p.loop()
	return p
}