コード例 #1
0
ファイル: health.go プロジェクト: charshy/registry
func newHealthChecker() *healthChecker {
	return &healthChecker{
		clients:               make(map[cluster.MemberID]*clientHealth),
		disconnectedThreshold: defaultDisconnectedThreshold,
		logger:                logging.GetLogger(module),
	}
}
コード例 #2
0
func newInMemoryRegistry(conf *Config, rep replication.Replication) Registry {
	var lentry = logging.GetLogger(module)

	if conf == nil {
		conf = DefaultConfig
	}

	registry := &inMemoryRegistry{
		namespaces: make(map[auth.Namespace]Catalog),
		rep:        rep,
		logger:     lentry,
		conf:       conf}

	if rep != nil {
		// Starts a synchronization operation with remote peers
		// Synchronization is blocking, executes once
		registry.synchronize()

		// Once Synchronization is complete, handle incoming replication events
		// Starts an end point to enable incoming replication events from remote peers to do
		// a registration to the local catalog
		go registry.replicate()
	}

	return registry
}
コード例 #3
0
ファイル: accesslog.go プロジェクト: charshy/registry
// MiddlewareFunc makes AccessLogApacheMiddleware implement the Middleware interface.
func (mw *AccessLog) MiddlewareFunc(h rest.HandlerFunc) rest.HandlerFunc {
	mw.logger = logging.GetLogger(module).WithField("apptype", "service-discovery")

	return func(w rest.ResponseWriter, r *rest.Request) {
		// We log the message in a defer function to make sure that the message
		// is logged even if a panic occurs in some handler in the chain
		defer func() {
			reqID, ok := r.Env[SdRequestID].(string)
			if !ok {
				reqID = "Unknown"
			}

			l := mw.logger.WithFields(log.Fields{
				"sd-request-id": reqID,
				"namespace":     mw.namespace(r),
				"method":        mw.method(r),
				"protocol":      mw.protocol(r),
				"returncode":    mw.statusCode(r),
				"byteswritten":  mw.bytesWritten(r),
				"elapsedtime":   mw.elapsedTime(r)})

			if len(headersWhitelist) > 0 {
				l = l.WithField("headers", mw.headers(r))
			}
			l.Infof("%s %s %s %s", mw.remoteAddr(r), r.Method, r.RequestURI, r.Proto)
		}()

		// call the handler
		h(w, r)
	}
}
コード例 #4
0
ファイル: health.go プロジェクト: charshy/registry
func newHealthChecker(membership Membership, threshold int) *healthChecker {
	hc := &healthChecker{
		membership:         membership,
		threshold:          threshold,
		clusterSize:        threshold, // initialize "healthy"
		subsizeTimestamp:   time.Now(),
		subsizeGracePeriod: defaultSubsizeThreshold,
		logger:             logging.GetLogger(module),
	}
	hc.RecordSize()
	membership.RegisterListener(hc)
	return hc
}
コード例 #5
0
ファイル: registrator.go プロジェクト: charshy/registry
func newRegistrator(backend backend, member Member, membership Membership, interval time.Duration) *registrator {
	reg := &registrator{
		backend:    backend,
		member:     member,
		membership: membership,
		interval:   interval,
		rejoin:     make(chan struct{}),
		done:       make(chan struct{}),
		logger:     logging.GetLogger(module),
	}
	reg.listener = &ongoingListener{reg: reg}
	return reg
}
コード例 #6
0
ファイル: membership.go プロジェクト: charshy/registry
func newMembership(backend backend, ttl, interval time.Duration) *membership {
	m := &membership{
		backend:     backend,
		cache:       make(map[MemberID]*member),
		ttl:         ttl,
		interval:    interval,
		sizeMetric:  metrics.NewRegisteredGauge(membershipSizeMetricName, metrics.DefaultRegistry),
		churnMetric: metrics.NewRegisteredMeter(membershipChurnMetricName, metrics.DefaultRegistry),
		logger:      logging.GetLogger(module),
	}

	m.sizeMetric.Update(0)

	return m
}
コード例 #7
0
func newReplicatedCatalog(namespace auth.Namespace, conf *Config, replicator replication.Replicator) Catalog {
	logger := logging.GetLogger(module).WithFields(log.Fields{"namespace": namespace})

	if conf == nil {
		conf = DefaultConfig
	}

	localMemberCatalog := newInMemoryCatalog(conf)
	if replicator == nil {
		return localMemberCatalog
	}

	rpc := &replicatedCatalog{
		local:         localMemberCatalog,
		replicator:    replicator,
		notifyChannel: channels.NewChannelTimeout(256),
		logger:        logger,
	}
	go rpc.handleMsgs()

	rpc.logger.Infof("Replicated-Catalog creation done")
	return rpc
}
コード例 #8
0
import (
	"time"

	"github.com/Sirupsen/logrus"
	"github.com/amalgam8/registry/utils/logging"
	gometrics "github.com/rcrowley/go-metrics"
)

// interval at which the metrics registry is dumped
const dumpInterval = 10 * time.Minute

// module name to be used in logging
const moduleName = "METRICS"

var logger = logging.GetLogger(moduleName)

// DumpPeriodically logs the values of the entire go-metrics registry, periodically.
// This function blocks, so should be called within a separate goroutine.
func DumpPeriodically() {
	dumpPeriodically(dumpInterval, gometrics.DefaultRegistry)
}

func dumpPeriodically(interval time.Duration, registry gometrics.Registry) {
	for range time.Tick(interval) {
		dumpRegistry(registry)
	}
}

func dumpRegistry(registry gometrics.Registry) {
	logger.Info("Dumping metrics registry")
コード例 #9
0
ファイル: routes.go プロジェクト: charshy/registry
// New creates a new eureka Server instance
func New(registry store.Registry) *Routes {
	return &Routes{
		registry: registry,
		logger:   logging.GetLogger(module),
	}
}
コード例 #10
0
ファイル: server.go プロジェクト: charshy/registry
// New - Create a new replication instance
func New(conf *Config) (Replication, error) {
	var lentry = logging.GetLogger(module)

	if conf == nil {
		err := fmt.Errorf("Nil conf")
		lentry.WithFields(log.Fields{
			"error": err,
		}).Error("Failed to create replication server")

		return nil, err
	}

	if conf.Membership == nil || conf.Registrator == nil {
		err := fmt.Errorf("Nil cluster membership and/or registrator")
		lentry.WithFields(log.Fields{
			"error": err,
		}).Error("Failed to create replication server")

		return nil, err
	}

	// Make sure that the listening port is free
	address := fmt.Sprintf("%s:%d", conf.Registrator.Self().IP(), conf.Registrator.Self().Port())
	listener, err := net.Listen("tcp", address)
	if err != nil {
		lentry.WithFields(log.Fields{
			"error": err,
		}).Error("Failed to create replication server")

		return nil, err
	}

	tr := &http.Transport{MaxIdleConnsPerHost: 1}
	hc := &http.Client{Transport: tr}
	logger := lentry.WithFields(log.Fields{"Member-ID": conf.Registrator.Self().ID()})

	// Instantiate a server
	s := &server{
		listener:       listener,
		httpclient:     hc,
		transport:      tr,
		broadcast:      channels.NewChannelTimeout(512),
		repair:         channels.NewChannelTimeout(512),
		newPeers:       make(chan *peer),
		closingPeers:   make(chan *peer, 8),
		peers:          make(map[cluster.MemberID]*peer),
		mux:            http.NewServeMux(),
		replicators:    make(map[auth.Namespace]*replicator),
		clients:        make(map[cluster.MemberID]*client),
		selfID:         conf.Registrator.Self().ID(),
		membership:     conf.Membership,
		registrator:    conf.Registrator,
		notifyChannel:  make(chan *InMessage, 512),
		syncReqChannel: make(chan chan []byte, 8),
		health:         newHealthChecker(),
		done:           make(chan struct{}),
		logger:         logger,
	}

	health.Register(module, s.health)

	logger.Info("Replication server created")
	return s, nil
}