Exemplo n.º 1
0
func NewGossipNetwork(netConf Config, executor UpdateExecutor) *Network {
	conf := memberlist.DefaultLocalConfig()
	network := &Network{
		executor: executor,
		version:  netConf.LocalVersion,
	}

	conf.BindPort = netConf.LocalPort
	conf.Name = netConf.Name
	conf.Delegate = network

	list, err := memberlist.Create(conf)
	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}

	n := 0
	for i := 0; i < 3; i++ {
		n, err = list.Join(netConf.RootNodes)
		if n > 0 {
			break
		}
	}

	if n == 0 {
		panic("Can't connect to any of the root nodes: " + err.Error())
	}

	network.members = list

	return network
}
Exemplo n.º 2
0
//New returns a new instance of the cluster, created using the given Config.
func New(config *Config) (*Cluster, error) {
	c := &Cluster{
		Config: config,
		name:   fmt.Sprintf("%d", config.ID),
	}

	memberlistConfig := memberlist.DefaultLANConfig()
	memberlistConfig.Name = c.name
	memberlistConfig.BindAddr = config.Host
	memberlistConfig.BindPort = config.Port

	//TODO Cosmin temporarily disabling any logging from memberlist, we might want to enable it again using logrus?
	memberlistConfig.LogOutput = ioutil.Discard

	ml, err := memberlist.Create(memberlistConfig)
	if err != nil {
		logger.WithField("error", err).Error("Error when creating the internal memberlist of the cluster")
		return nil, err
	}
	c.memberlist = ml
	memberlistConfig.Delegate = c
	memberlistConfig.Conflict = c
	memberlistConfig.Events = c

	return c, nil
}
Exemplo n.º 3
0
func (nDB *NetworkDB) clusterInit() error {
	config := memberlist.DefaultLANConfig()
	config.Name = nDB.config.NodeName
	config.BindAddr = nDB.config.BindAddr

	if nDB.config.BindPort != 0 {
		config.BindPort = nDB.config.BindPort
	}

	config.ProtocolVersion = memberlist.ProtocolVersionMax
	config.Delegate = &delegate{nDB: nDB}
	config.Events = &eventDelegate{nDB: nDB}
	config.LogOutput = &logWriter{}

	var err error
	if len(nDB.config.Keys) > 0 {
		for i, key := range nDB.config.Keys {
			logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
		}
		nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
		if err != nil {
			return err
		}
		config.Keyring = nDB.keyring
	}

	nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(nDB.nodes)
		},
		RetransmitMult: config.RetransmitMult,
	}

	mlist, err := memberlist.Create(config)
	if err != nil {
		return fmt.Errorf("failed to create memberlist: %v", err)
	}

	nDB.stopCh = make(chan struct{})
	nDB.memberlist = mlist
	nDB.mConfig = config

	for _, trigger := range []struct {
		interval time.Duration
		fn       func()
	}{
		{reapInterval, nDB.reapState},
		{config.GossipInterval, nDB.gossip},
		{config.PushPullInterval, nDB.bulkSyncTables},
	} {
		t := time.NewTicker(trigger.interval)
		go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
		nDB.tickers = append(nDB.tickers, t)
	}

	return nil
}
Exemplo n.º 4
0
func benchmarkCluster(b *testing.B, num int, timeoutForAllJoins time.Duration, lowestPort int) {
	startTime := time.Now()

	var nodes []*memberlist.Memberlist
	eventC := make(chan memberlist.NodeEvent, num)
	addr := "127.0.0.1"
	var firstMemberName string
	for i := 0; i < num; i++ {
		c := memberlist.DefaultLANConfig()
		port := lowestPort + i
		c.Name = fmt.Sprintf("%s:%d", addr, port)
		c.BindAddr = addr
		c.BindPort = port
		c.ProbeInterval = 20 * time.Millisecond
		c.ProbeTimeout = 100 * time.Millisecond
		c.GossipInterval = 20 * time.Millisecond
		c.PushPullInterval = 200 * time.Millisecond

		c.LogOutput = ioutil.Discard

		if i == 0 {
			c.Events = &memberlist.ChannelEventDelegate{eventC}
			firstMemberName = c.Name
		}

		newMember, err := memberlist.Create(c)
		if err != nil {
			log.WithField("error", err).Fatal("Unexpected error when creating the memberlist")
		}
		nodes = append(nodes, newMember)
		defer newMember.Shutdown()

		if i > 0 {
			numContacted, err := newMember.Join([]string{firstMemberName})
			if numContacted == 0 || err != nil {
				log.WithField("error", err).Fatal("Unexpected fatal error when node wanted to join the cluster")
			}
		}
	}

	if convergence(nodes, num, eventC, timeoutForAllJoins) {
		endTime := time.Now()
		log.WithField("durationSeconds", endTime.Sub(startTime).Seconds()).Info("Cluster convergence reached")
	}

	b.StartTimer()
	sendMessagesInCluster(nodes, b.N)
	b.StopTimer()
}
Exemplo n.º 5
0
func New(cfg Config) *MemberlistTransport {
	sd := &SyncerDelegate{}
	var mlCfg *memberlist.Config = cfg.Config
	mlCfg.Delegate = sd
	ml, err := memberlist.Create(mlCfg)
	if err != nil {
		panic(err.Error())
	}

	n := &MemberlistTransport{
		Memberlist:  ml,
		subscribers: make(map[libsyncer.MessageType][]Callback),
	}
	sd.Callback = n.receiveMessage
	return n
}
Exemplo n.º 6
0
func (c *Cluster) Join() error {
	nodeMeta, err := proto.Marshal(c.LocalNodeMeta)
	if err != nil {
		return err
	}

	c.nodeMetaBuffer = nodeMeta

	m, err := memberlist.Create(c.config)
	if err != nil {
		return err
	}

	c.Members = m
	_, err = m.Join([]string{c.existing})
	return err
}
Exemplo n.º 7
0
func (t *Tracker) setupMemberlist() (err error) {

	conf := memberlist.DefaultLANConfig()
	conf.LogOutput = ioutil.Discard

	conf.Name = fmt.Sprintf("%s:%s", t.cfg.UUID().FullString(), t.iface)

	conf.BindAddr = t.iface
	conf.BindPort = t.port

	conf.Delegate = newDelegate(t.adport, t.cfg)
	conf.Events = t.evtHandler

	t.memberlist, err = memberlist.Create(conf)

	return
}
Exemplo n.º 8
0
func main() {
	fmt.Println("main A")
	config := memberlist.DefaultLANConfig()
	config.BindAddr = "192.168.50.25"
	list, err := memberlist.Create(config)
	n, err := list.Join([]string{"192.168.50.25"})
	if err != nil {
		panic("Failed to join cluster: " + err.Error())
	}
	log.Println("@n:", n)
	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}
	for {
		checkCluster(list)
		time.Sleep(time.Second)
	}
}
Exemplo n.º 9
0
func NewDistributedEventEmitter(cluster []string, bindAddr string) *DistributedEventEmitter {
	dee := DistributedEventEmitter{}
	c := memberlist.DefaultLANConfig()
	c.Name = bindAddr
	c.BindAddr = bindAddr

	ml, err := memberlist.Create(c)
	dee.ml = ml

	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}

	_, err = dee.ml.Join(cluster)
	if err != nil {
		panic("Failed to join cluster: " + err.Error())
	}
	h, err := os.Hostname()
	if err != nil {
		panic("Failed to get hostname" + err.Error())
	}

	fmt.Sprintf(dee.nodeId, "%s:%d", h, os.Getpid())

	dee.listeners = make(map[string][]func([]byte))
	a, err := net.ResolveUDPAddr("udp", mcastAddr)

	if err != nil {
		panic("Error converting mcast addr: " + err.Error())
	}

	dee.sub, err = net.ListenMulticastUDP("udp", nil, a)
	dee.sub.SetReadBuffer(maxDGSize)

	if err != nil {
		panic("Failed listen to UDP mcast: " + err.Error())
	}

	go dee.readLoop(dee.sub)

	return &dee
}
Exemplo n.º 10
0
func main() {
	cfg := memberlist.DefaultLocalConfig()
	log.Println("@bindPort:", cfg.BindPort, "  @advPort:", cfg.AdvertisePort)
	list, err := memberlist.Create(cfg)
	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}

	// Join an existing cluster by specifying at least one known member.tu
	n, err := list.Join([]string{seedPort})
	if err != nil {
		panic("Failed to join cluster: " + err.Error())
	}
	log.Println("@n:", n)

	// Ask for members of the cluster
	for _, member := range list.Members() {
		fmt.Printf("Member: %s %s\n", member.Name, member.Addr)
	}
	time.Sleep(time.Hour * time.Duration(1))
}
Exemplo n.º 11
0
func CreateMemberlistAgent(opdata *utilities.OPData, observer *Observer) *MemberlistAgent {
	ma := new(MemberlistAgent)
	fmt.Println("c1")
	c := memberlist.DefaultLocalConfig()
	fmt.Println("c3")
	c.Name = opdata.Name()
	fmt.Println("c4")
	c.BindAddr = opdata.Ovip
	c.BindPort = opdata.Serfport
	c.Events = observer
	fmt.Println("c5")
	list, err := memberlist.Create(c)
	fmt.Println("c6")
	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}
	ma.list = list
	ma.conf = c
	fmt.Println("MMBL created")
	return ma
}
Exemplo n.º 12
0
func NewAgent(c *Config) (*Agent, error) {
	var err error

	mlConfig := memberlist.DefaultLANConfig()
	mlConfig.Name = c.Name
	mlConfig.BindAddr, mlConfig.BindPort, err = parseHostPort(c.BindAddr)
	if err != nil {
		return nil, err
	}
	if c.AdvertiseAddr != "" {
		mlConfig.AdvertiseAddr, mlConfig.AdvertisePort, err = parseHostPort(c.AdvertiseAddr)
		if err != nil {
			return nil, err
		}
	} else {
		mlConfig.AdvertiseAddr = mlConfig.BindAddr
		mlConfig.AdvertisePort = mlConfig.BindPort
	}

	agent := &Agent{}
	agent.config = mlConfig

	mlConfig.Delegate = &Delegate{agent}
	ml, err := memberlist.Create(mlConfig)
	if err != nil {
		log.Fatalf("create memberlist: %s", err.Error())
	}

	agent.memberlist = ml
	agent.broadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return agent.memberlist.NumMembers()
		},
		RetransmitMult: mlConfig.RetransmitMult,
	}

	return agent, nil
}
Exemplo n.º 13
0
// Setup creates a new instance of memberlist, assigns it to list, and
// sets the local nodes meta data as the rpc address.
func (m *Memberlist) Setup(t *Toystore) {
	memberConfig := memberlist.DefaultLocalConfig()
	memberConfig.BindAddr = t.Host
	memberConfig.Name = t.Host
	// Set IndirectChecks to 0 so we see a local view of membership.
	// I.e. we don't care about nodes hidden by partitions.
	memberConfig.IndirectChecks = 0
	// This is set really low for testing purposes. Should be ~100ms.
	memberConfig.GossipInterval = time.Millisecond * 20
	// Sets delegate to handle membership change events.
	memberConfig.Events = &MemberlistEvents{t}

	list, err := memberlist.Create(memberConfig)
	if err != nil {
		panic(err)
	}
	m.list = list
	n := m.list.LocalNode()
	n.Meta = []byte(t.rpcAddress())

	if err != nil {
		panic("Failed to create memberlist: " + err.Error())
	}
}
Exemplo n.º 14
0
func New(cfg *Config) (*tribe, error) {
	cfg.MemberlistConfig.Name = cfg.Name
	cfg.MemberlistConfig.BindAddr = cfg.BindAddr
	cfg.MemberlistConfig.BindPort = cfg.BindPort
	logger := logger.WithFields(log.Fields{
		"port": cfg.MemberlistConfig.BindPort,
		"addr": cfg.MemberlistConfig.BindAddr,
		"name": cfg.MemberlistConfig.Name,
	})

	tribe := &tribe{
		agreements:         map[string]*agreement.Agreement{},
		members:            map[string]*agreement.Member{},
		taskStateResponses: map[string]*taskStateQueryResponse{},
		taskStartStopCache: newCache(),
		msgBuffer:          make([]msg, 512),
		intentBuffer:       []msg{},
		logger:             logger.WithField("_name", cfg.MemberlistConfig.Name),
		tags: map[string]string{
			agreement.RestPort:               strconv.Itoa(cfg.RestAPIPort),
			agreement.RestProtocol:           cfg.RestAPIProto,
			agreement.RestInsecureSkipVerify: cfg.RestAPIInsecureSkipVerify,
		},
		pluginWorkQueue: make(chan worker.PluginRequest, 999),
		taskWorkQueue:   make(chan worker.TaskRequest, 999),
		workerQuitChan:  make(chan struct{}),
		workerWaitGroup: &sync.WaitGroup{},
		config:          cfg,
		EventManager:    gomit.NewEventController(),
	}

	tribe.broadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(tribe.memberlist.Members())
		},
		RetransmitMult: memberlist.DefaultLANConfig().RetransmitMult,
	}

	//configure delegates
	cfg.MemberlistConfig.Delegate = &delegate{tribe: tribe}
	cfg.MemberlistConfig.Events = &memberDelegate{tribe: tribe}

	ml, err := memberlist.Create(cfg.MemberlistConfig)
	if err != nil {
		logger.Error(err)
		return nil, err
	}
	tribe.memberlist = ml

	if cfg.Seed != "" {
		_, err := ml.Join([]string{cfg.Seed})
		if err != nil {
			logger.WithFields(log.Fields{
				"seed": cfg.Seed,
			}).Error(errMemberlistJoin)
			return nil, errMemberlistJoin
		}
		logger.WithFields(log.Fields{
			"seed": cfg.Seed,
		}).Infoln("tribe started")
		return tribe, nil
	}
	logger.WithFields(log.Fields{
		"seed": "none",
	}).Infoln("tribe started")
	return tribe, nil
}
Exemplo n.º 15
0
// Create creates a new Serf instance, starting all the background tasks
// to maintain cluster membership information.
//
// After calling this function, the configuration should no longer be used
// or modified by the caller.
func Create(conf *Config) (*Serf, error) {
	conf.Init()
	if conf.ProtocolVersion < ProtocolVersionMin {
		return nil, fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
			conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
	} else if conf.ProtocolVersion > ProtocolVersionMax {
		return nil, fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
			conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
	}

	serf := &Serf{
		config:        conf,
		logger:        log.New(conf.LogOutput, "", log.LstdFlags),
		members:       make(map[string]*memberState),
		queryResponse: make(map[LamportTime]*QueryResponse),
		shutdownCh:    make(chan struct{}),
		state:         SerfAlive,
	}

	// Check that the meta data length is okay
	if len(serf.encodeTags(conf.Tags)) > memberlist.MetaMaxSize {
		return nil, fmt.Errorf("Encoded length of tags exceeds limit of %d bytes", memberlist.MetaMaxSize)
	}

	// Check if serf member event coalescing is enabled
	if conf.CoalescePeriod > 0 && conf.QuiescentPeriod > 0 && conf.EventCh != nil {
		c := &memberEventCoalescer{
			lastEvents:   make(map[string]EventType),
			latestEvents: make(map[string]coalesceEvent),
		}

		conf.EventCh = coalescedEventCh(conf.EventCh, serf.shutdownCh,
			conf.CoalescePeriod, conf.QuiescentPeriod, c)
	}

	// Check if user event coalescing is enabled
	if conf.UserCoalescePeriod > 0 && conf.UserQuiescentPeriod > 0 && conf.EventCh != nil {
		c := &userEventCoalescer{
			events: make(map[string]*latestUserEvents),
		}

		conf.EventCh = coalescedEventCh(conf.EventCh, serf.shutdownCh,
			conf.UserCoalescePeriod, conf.UserQuiescentPeriod, c)
	}

	// Listen for internal Serf queries. This is setup before the snapshotter, since
	// we want to capture the query-time, but the internal listener does not passthrough
	// the queries
	outCh, err := newSerfQueries(serf, serf.logger, conf.EventCh, serf.shutdownCh)
	if err != nil {
		return nil, fmt.Errorf("Failed to setup serf query handler: %v", err)
	}
	conf.EventCh = outCh

	// Try access the snapshot
	var oldClock, oldEventClock, oldQueryClock LamportTime
	var prev []*PreviousNode
	if conf.SnapshotPath != "" {
		eventCh, snap, err := NewSnapshotter(conf.SnapshotPath,
			snapshotSizeLimit,
			conf.RejoinAfterLeave,
			serf.logger,
			&serf.clock,
			conf.EventCh,
			serf.shutdownCh)
		if err != nil {
			return nil, fmt.Errorf("Failed to setup snapshot: %v", err)
		}
		serf.snapshotter = snap
		conf.EventCh = eventCh
		prev = snap.AliveNodes()
		oldClock = snap.LastClock()
		oldEventClock = snap.LastEventClock()
		oldQueryClock = snap.LastQueryClock()
		serf.eventMinTime = oldEventClock + 1
		serf.queryMinTime = oldQueryClock + 1
	}

	// Setup the various broadcast queues, which we use to send our own
	// custom broadcasts along the gossip channel.
	serf.broadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(serf.members)
		},
		RetransmitMult: conf.MemberlistConfig.RetransmitMult,
	}
	serf.eventBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(serf.members)
		},
		RetransmitMult: conf.MemberlistConfig.RetransmitMult,
	}
	serf.queryBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(serf.members)
		},
		RetransmitMult: conf.MemberlistConfig.RetransmitMult,
	}

	// Create the buffer for recent intents
	serf.recentJoin = make([]nodeIntent, conf.RecentIntentBuffer)
	serf.recentLeave = make([]nodeIntent, conf.RecentIntentBuffer)

	// Create a buffer for events and queries
	serf.eventBuffer = make([]*userEvents, conf.EventBuffer)
	serf.queryBuffer = make([]*queries, conf.QueryBuffer)

	// Ensure our lamport clock is at least 1, so that the default
	// join LTime of 0 does not cause issues
	serf.clock.Increment()
	serf.eventClock.Increment()
	serf.queryClock.Increment()

	// Restore the clock from snap if we have one
	serf.clock.Witness(oldClock)
	serf.eventClock.Witness(oldEventClock)
	serf.queryClock.Witness(oldQueryClock)

	// Modify the memberlist configuration with keys that we set
	conf.MemberlistConfig.Events = &eventDelegate{serf: serf}
	conf.MemberlistConfig.Conflict = &conflictDelegate{serf: serf}
	conf.MemberlistConfig.Delegate = &delegate{serf: serf}
	conf.MemberlistConfig.DelegateProtocolVersion = conf.ProtocolVersion
	conf.MemberlistConfig.DelegateProtocolMin = ProtocolVersionMin
	conf.MemberlistConfig.DelegateProtocolMax = ProtocolVersionMax
	conf.MemberlistConfig.Name = conf.NodeName
	conf.MemberlistConfig.ProtocolVersion = ProtocolVersionMap[conf.ProtocolVersion]

	// Setup a merge delegate if necessary
	if conf.Merge != nil {
		conf.MemberlistConfig.Merge = &mergeDelegate{serf: serf}
	}

	// Create the underlying memberlist that will manage membership
	// and failure detection for the Serf instance.
	memberlist, err := memberlist.Create(conf.MemberlistConfig)
	if err != nil {
		return nil, err
	}

	serf.memberlist = memberlist

	// Create a key manager for handling all encryption key changes
	serf.keyManager = &KeyManager{serf: serf}

	// Start the background tasks. See the documentation above each method
	// for more information on their role.
	go serf.handleReap()
	go serf.handleReconnect()
	go serf.checkQueueDepth("Intent", serf.broadcasts)
	go serf.checkQueueDepth("Event", serf.eventBroadcasts)
	go serf.checkQueueDepth("Query", serf.queryBroadcasts)

	// Attempt to re-join the cluster if we have known nodes
	if len(prev) != 0 {
		go serf.handleRejoin(prev)
	}

	return serf, nil
}
Exemplo n.º 16
0
func benchmarkCluster(b *testing.B, num int, timeoutForAllJoins time.Duration, lowestPort int) {
	log.WithField("num", b.N).Fatal("Unexpected error when creating the memberlist")

	startTime := time.Now()

	var nodes []*memberlist.Memberlist
	eventC := make(chan memberlist.NodeEvent, num)
	addr := "127.0.0.1"
	var firstMemberName string
	for i := 0; i < num; i++ {
		c := memberlist.DefaultLANConfig()
		port := lowestPort + i
		c.Name = fmt.Sprintf("%s:%d", addr, port)
		c.BindAddr = addr
		c.BindPort = port
		c.ProbeInterval = 20 * time.Millisecond
		c.ProbeTimeout = 100 * time.Millisecond
		c.GossipInterval = 20 * time.Millisecond
		c.PushPullInterval = 200 * time.Millisecond

		c.LogOutput = ioutil.Discard

		if i == 0 {
			c.Events = &memberlist.ChannelEventDelegate{eventC}
			firstMemberName = c.Name
		}

		newMember, err := memberlist.Create(c)
		if err != nil {
			log.WithField("error", err).Fatal("Unexpected error when creating the memberlist")
		}
		nodes = append(nodes, newMember)
		defer newMember.Shutdown()

		if i >= 0 {
			num, err := newMember.Join([]string{firstMemberName})
			if num == 0 || err != nil {
				log.WithField("error", err).Fatal("Unexpected fatal error when node wanted to join the cluster")
			}
		}
	}

	breakTimer := time.After(timeoutForAllJoins)
	numJoins := 0
WAIT:
	for {
		select {
		case e := <-eventC:
			l := log.WithFields(log.Fields{
				"node":       *e.Node,
				"numJoins":   numJoins,
				"numMembers": nodes[0].NumMembers(),
			})
			if e.Event == memberlist.NodeJoin {
				l.Info("Node join")
				numJoins++
				if numJoins == num {
					l.Info("All nodes joined")
					break WAIT
				}
			} else {
				l.Info("Node leave")
			}
		case <-breakTimer:
			break WAIT
		}
	}

	if numJoins != num {
		log.WithFields(log.Fields{
			"joinCounter": numJoins,
			"num":         num,
		}).Error("Timeout before completing all joins")
	}

	convergence := false
	for !convergence {
		convergence = true
		for idx, node := range nodes {
			numSeenByNode := node.NumMembers()
			if numSeenByNode != num {
				log.WithFields(log.Fields{
					"index":    idx,
					"expected": num,
					"actual":   numSeenByNode,
				}).Debug("Wrong number of nodes")
				convergence = false
				break
			}
		}
	}
	endTime := time.Now()
	if numJoins == num {
		log.WithField("durationSeconds", endTime.Sub(startTime).Seconds()).Info("Cluster convergence reached")
	}

	b.StartTimer()

	for senderID, node := range nodes {
		for receiverID, member := range node.Members() {
			for i := 0; i < b.N; i++ {
				message := fmt.Sprintf("Hello from %v to %v !", senderID, receiverID)
				log.WithField("message", message).Debug("SendToTCP")
				node.SendToTCP(member, []byte(message))
			}
		}
	}

	b.StopTimer()
}
Exemplo n.º 17
0
func (nDB *NetworkDB) clusterInit() error {
	config := memberlist.DefaultLANConfig()
	config.Name = nDB.config.NodeName
	config.BindAddr = nDB.config.BindAddr
	config.AdvertiseAddr = nDB.config.AdvertiseAddr

	if nDB.config.BindPort != 0 {
		config.BindPort = nDB.config.BindPort
	}

	config.ProtocolVersion = memberlist.ProtocolVersionMax
	config.Delegate = &delegate{nDB: nDB}
	config.Events = &eventDelegate{nDB: nDB}
	// custom logger that does not add time or date, so they are not
	// duplicated by logrus
	config.Logger = log.New(&logWriter{}, "", 0)

	var err error
	if len(nDB.config.Keys) > 0 {
		for i, key := range nDB.config.Keys {
			logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
		}
		nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
		if err != nil {
			return err
		}
		config.Keyring = nDB.keyring
	}

	nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			nDB.RLock()
			num := len(nDB.nodes)
			nDB.RUnlock()
			return num
		},
		RetransmitMult: config.RetransmitMult,
	}

	nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			nDB.RLock()
			num := len(nDB.nodes)
			nDB.RUnlock()
			return num
		},
		RetransmitMult: config.RetransmitMult,
	}

	mlist, err := memberlist.Create(config)
	if err != nil {
		return fmt.Errorf("failed to create memberlist: %v", err)
	}

	nDB.stopCh = make(chan struct{})
	nDB.memberlist = mlist

	for _, trigger := range []struct {
		interval time.Duration
		fn       func()
	}{
		{reapPeriod, nDB.reapState},
		{config.GossipInterval, nDB.gossip},
		{config.PushPullInterval, nDB.bulkSyncTables},
		{retryInterval, nDB.reconnectNode},
		{nodeReapPeriod, nDB.reapDeadNode},
	} {
		t := time.NewTicker(trigger.interval)
		go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
		nDB.tickers = append(nDB.tickers, t)
	}

	return nil
}
Exemplo n.º 18
0
Arquivo: serf.go Projeto: rjw1/serf
// Create creates a new Serf instance, starting all the background tasks
// to maintain cluster membership information.
//
// After calling this function, the configuration should no longer be used
// or modified by the caller.
func Create(conf *Config) (*Serf, error) {
	serf := &Serf{
		config:     conf,
		logger:     log.New(conf.LogOutput, "", log.LstdFlags),
		members:    make(map[string]*memberState),
		shutdownCh: make(chan struct{}),
		state:      SerfAlive,
	}

	if conf.CoalescePeriod > 0 && conf.EventCh != nil {
		// Event coalescence is enabled, setup the channel.
		conf.EventCh = coalescedEventCh(conf.EventCh, serf.shutdownCh,
			conf.CoalescePeriod, conf.QuiescentPeriod)
	}

	// Setup the broadcast queue, which we use to send our own custom
	// broadcasts along the gossip channel.
	serf.broadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(serf.members)
		},
		RetransmitMult: conf.MemberlistConfig.RetransmitMult,
	}
	serf.eventBroadcasts = &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(serf.members)
		},
		RetransmitMult: conf.MemberlistConfig.RetransmitMult,
	}

	// Create the buffer for recent intents
	serf.recentJoin = make([]nodeIntent, conf.RecentIntentBuffer)
	serf.recentLeave = make([]nodeIntent, conf.RecentIntentBuffer)

	// Create a buffer for events
	serf.eventBuffer = make([]*userEvents, conf.EventBuffer)

	// Ensure our lamport clock is at least 1, so that the default
	// join LTime of 0 does not cause issues
	serf.clock.Increment()
	serf.eventClock.Increment()

	// Modify the memberlist configuration with keys that we set
	conf.MemberlistConfig.Events = &eventDelegate{serf: serf}
	conf.MemberlistConfig.Delegate = &delegate{serf: serf}
	conf.MemberlistConfig.Name = conf.NodeName

	// Create the underlying memberlist that will manage membership
	// and failure detection for the Serf instance.
	memberlist, err := memberlist.Create(conf.MemberlistConfig)
	if err != nil {
		return nil, err
	}

	serf.memberlist = memberlist

	// Start the background tasks. See the documentation above each method
	// for more information on their role.
	go serf.handleReap()
	go serf.handleReconnect()
	go serf.checkQueueDepth(conf.QueueDepthWarning, "Intent",
		serf.broadcasts, serf.shutdownCh)
	go serf.checkQueueDepth(conf.QueueDepthWarning, "Event",
		serf.eventBroadcasts, serf.shutdownCh)

	return serf, nil
}
Exemplo n.º 19
0
func NewRegistry(opts ...registry.Option) registry.Registry {
	var options registry.Options
	for _, o := range opts {
		o(&options)
	}

	cAddrs := []string{}
	hostname, _ := os.Hostname()
	updates := make(chan *update, 100)

	for _, addr := range options.Addrs {
		if len(addr) > 0 {
			cAddrs = append(cAddrs, addr)
		}
	}

	broadcasts := &memberlist.TransmitLimitedQueue{
		NumNodes: func() int {
			return len(cAddrs)
		},
		RetransmitMult: 3,
	}

	mr := &gossipRegistry{
		broadcasts: broadcasts,
		services:   make(map[string][]*registry.Service),
		updates:    updates,
		subs:       make(map[string]chan *registry.Result),
	}

	go mr.run()

	c := memberlist.DefaultLocalConfig()
	c.BindPort = 0
	c.Name = hostname + "-" + uuid.NewUUID().String()
	c.Delegate = &delegate{
		updates:    updates,
		broadcasts: broadcasts,
	}

	if options.Secure {
		k, ok := options.Context.Value(contextSecretKey{}).([]byte)
		if !ok {
			k = DefaultKey
		}
		c.SecretKey = k
	}

	m, err := memberlist.Create(c)
	if err != nil {
		log.Fatalf("Error creating memberlist: %v", err)
	}

	if len(cAddrs) > 0 {
		_, err := m.Join(cAddrs)
		if err != nil {
			log.Fatalf("Error joining members: %v", err)
		}
	}

	log.Printf("Local memberlist node %s:%d\n", m.LocalNode().Addr, m.LocalNode().Port)
	return mr
}
Exemplo n.º 20
0
func main() {
	flag.Parse()
	log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)

	/* Create the initial memberlist from a safe configuration.
	   Please reference the godoc for other default config types.
	   http://godoc.org/github.com/hashicorp/memberlist#Config
	*/
	evtDelegate := MultiEventDelegate{}
	var delegate = NewDelegate(*name, *tcpport)
	var cfg = memberlist.DefaultLocalConfig()
	cfg.Events = &evtDelegate
	cfg.Delegate = delegate
	cfg.Name = *name
	cfg.BindPort = *port
	cfg.BindAddr = *addr
	cfg.AdvertisePort = *port
	cfg.AdvertiseAddr = *addr

	list, err := memberlist.Create(cfg)
	if err != nil {
		log.Fatalln("Failed to create memberlist: " + err.Error())
		return
	}

	if len(*hosts) > 0 {
		// Join an existing cluster by specifying at least one known member.
		_, err = list.Join(strings.Split(*hosts, ","))
		if err != nil {
			log.Println("Failed to join cluster: " + err.Error())
		}
	}

	// Ask for members of the cluster
	for _, member := range list.Members() {
		fmt.Printf("Member: %s %s\n", member.Name, member.Addr)
	}

	lookup := NewNodeLookup()
	transport := NewTcpTransporter()

	// Setup raft
	id := uint32(Jesteress([]byte(cfg.Name)))
	log.Printf("Name: %s ID: %d", cfg.Name, id)
	storage := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         storage,
		MaxSizePerMsg:   4096,
		MaxInflightMsgs: 256,
	}
	log.Println("Node ID: ", c.ID)
	r := NewRaftNode(*name, c, storage, lookup, transport)
	evtDelegate.AddEventDelegate(r)

	// Listen for incoming connections.
	l, err := net.Listen("tcp", fmt.Sprintf(":%d", *tcpport))
	if err != nil {
		log.Println("Error listening:", err.Error())
		return
	}

	// Start TCP server
	server := TCPServer{l, r}
	go server.Start()
	fmt.Printf("Listening on %s:%d\n", *tcpaddr, *tcpport)

	// Close the listener when the application closes.
	defer server.Stop()

	// Start raft server
	go r.Start()

	// Handle signals
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, os.Interrupt, os.Kill)

	// Wait for signal
	log.Println("Cluster open for business")

	// Block until signal is received
	<-sig
	log.Println("Shutting down node")
	if err = list.Leave(time.Second); err != nil {
		log.Println("Error leaving cluster: " + err.Error())
	}

	if err = list.Shutdown(); err != nil {
		log.Println("Error shutting down node: " + err.Error())
	}
}
Exemplo n.º 21
0
// NewClusterBind creates a new Cluster while allowing for
// specification of the address/port to bind to, the address/port to
// advertize to the other nodes (use zero values for default) as well
// as the hostname. (This is useful if your app is running in a Docker
// container where it is impossible to figure out the outside IP
// addresses and the hostname can be the same).
func NewClusterBind(baddr string, bport int, aaddr string, aport int, rpcport int, name string) (*Cluster, error) {
	c := &Cluster{
		rcvChs:    make([]chan *Msg, 0),
		chgNotify: make([]chan bool, 0),
		dds:       make(map[string]*ddEntry),
		copies:    1,
		ncache:    make(map[*memberlist.Node]*Node),
	}
	cfg := memberlist.DefaultLANConfig()
	cfg.TCPTimeout = 30 * time.Second
	cfg.SuspicionMult = 6
	cfg.PushPullInterval = 15 * time.Second

	if baddr != "" {
		cfg.BindAddr = baddr
	}
	if bport != 0 {
		cfg.BindPort = bport
	}
	if aaddr != "" {
		cfg.AdvertiseAddr = aaddr
	}
	if aport != 0 {
		cfg.AdvertisePort = aport
	}
	if name != "" {
		cfg.Name = name
	}
	cfg.LogOutput = &logger{}
	cfg.Delegate, cfg.Events = c, c
	var err error
	if c.Memberlist, err = memberlist.Create(cfg); err != nil {
		return nil, err
	}
	md := &nodeMeta{sortBy: startTime.UnixNano()}
	c.saveMeta(md)
	if err = c.UpdateNode(updateNodeTO); err != nil {
		log.Printf("NewClusterBind(): UpdateNode() failed: %v", err)
		return nil, err
	}

	if rpcport == 0 {
		c.rpcPort = 12354
	} else {
		c.rpcPort = rpcport
	}

	c.snd, c.rcv = c.RegisterMsgType()

	rpc.Register(&ClusterRPC{c})
	if c.rpc, err = net.Listen("tcp", fmt.Sprintf("%s:%d", baddr, c.rpcPort)); err != nil {
		return nil, err
	}

	// Serve RPC Requests
	go func() {
		for {
			rpc.Accept(c.rpc)
		}
	}()

	return c, nil
}
Exemplo n.º 22
0
func main() {
	log.SetFlags(0)
	log.SetPrefix("gossipchat: ")

	port := flag.Int("port", 0, "port on which to listen")
	nick := flag.String("nick", "", "nickname to use on the chat")
	peersCSL := flag.String("peers", "", "comma separated list of addresses where peers can be found")
	flag.Parse()

	if *nick == "" {
		log.Fatal("you need to provice a -nick")
	}

	peers, err := parsePeers(*peersCSL)
	if err != nil {
		log.Fatalf("invalid value for flag -peers. %v", err)
	}

	var (
		msgc    <-chan []message
		memberc <-chan []string

		chatc = make(chan message, 1)
	)
	delegate, msgc := newDelegate(100, chatc)

	conf := memberlist.DefaultLANConfig()
	conf.LogOutput = ioutil.Discard
	conf.BindPort = *port
	conf.Name = *nick
	conf.Delegate = delegate
	conf.Events, memberc = newEvents()
	conf.Conflict = newConflicts()
	conf.PushPullInterval = time.Second
	mlist, err := memberlist.Create(conf)
	if err != nil {
		log.Fatalf("can't create memberlist: %v", err)
	}
	log.SetPrefix(mlist.LocalNode().Name + ": ")
	delegate.queue.NumNodes = mlist.NumMembers

	count, err := mlist.Join(peers)
	if err != nil {
		log.Printf("can't join peers: %v", err)
	}
	log.Printf("joined %d peers", count)

	log.Printf("we are reachable at %q", fmt.Sprintf("%s:%d",
		mlist.LocalNode().Addr, mlist.LocalNode().Port))
	name := mlist.LocalNode().Name

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		defer cancel()
		uichatc, err := runUI(ctx, name, memberc, msgc)
		if err != nil {
			log.Printf("can't start UI: %v", err)
			return
		}
		for chatmsg := range uichatc {
			select {
			case chatc <- chatmsg:
			case <-ctx.Done():
				return
			}
		}
		log.Printf("stopped UI")
	}()

	<-ctx.Done()

	if err := mlist.Leave(leaveTimeout); err != nil {
		log.Printf("could not announce leave: %v", err)
	}
	if err := mlist.Shutdown(); err != nil {
		log.Printf("could not shutdown cleanly: %v", err)
	}
}