func setupReplication(c *cli.Context, cluster cluster.Cluster, server *api.Server, discovery discovery.Backend, addr string, leaderTTL time.Duration, tlsConfig *tls.Config) { kvDiscovery, ok := discovery.(*kvdiscovery.Discovery) if !ok { log.Fatal("Leader election is only supported with consul, etcd and zookeeper discovery.") } client := kvDiscovery.Store() p := path.Join(kvDiscovery.Prefix(), leaderElectionPath) candidate := leadership.NewCandidate(client, p, addr, leaderTTL) follower := leadership.NewFollower(client, p) primary := api.NewPrimary(cluster, tlsConfig, &statusHandler{cluster, candidate, follower}, c.GlobalBool("debug"), c.Bool("cors")) replica := api.NewReplica(primary, tlsConfig) go func() { for { run(candidate, server, primary, replica) time.Sleep(defaultRecoverTime) } }() go func() { for { follow(follower, replica, addr) time.Sleep(defaultRecoverTime) } }() server.SetHandler(primary) }
// NewLeadership creates a leadership func NewLeadership(ctx context.Context, cluster *types.Cluster) *Leadership { return &Leadership{ Pool: safe.NewPool(ctx), Cluster: cluster, candidate: leadership.NewCandidate(cluster.Store, cluster.Store.Prefix+"/leader", cluster.Node, 20*time.Second), listeners: []LeaderListener{}, } }
func (a *AgentCommand) participate() { a.candidate = leadership.NewCandidate(a.store.Client, a.store.LeaderKey(), a.config.NodeName, defaultLeaderTTL) go func() { for { a.runForElection() // retry time.Sleep(defaultRecoverTime) } }() }
func NewSentinel(uid string, cfg *config, stop chan bool, end chan bool) (*Sentinel, error) { var initialClusterSpec *cluster.ClusterSpec if cfg.initialClusterSpecFile != "" { configData, err := ioutil.ReadFile(cfg.initialClusterSpecFile) if err != nil { return nil, fmt.Errorf("cannot read provided initial cluster config file: %v", err) } if err := json.Unmarshal(configData, &initialClusterSpec); err != nil { return nil, fmt.Errorf("cannot parse provided initial cluster config: %v", err) } log.Debug("initialClusterSpec dump", zap.String("initialClusterSpec", spew.Sdump(initialClusterSpec))) if err := initialClusterSpec.Validate(); err != nil { return nil, fmt.Errorf("invalid initial cluster: %v", err) } } storePath := filepath.Join(common.StoreBasePath, cfg.clusterName) kvstore, err := store.NewStore(store.Config{ Backend: store.Backend(cfg.storeBackend), Endpoints: cfg.storeEndpoints, CertFile: cfg.storeCertFile, KeyFile: cfg.storeKeyFile, CAFile: cfg.storeCAFile, SkipTLSVerify: cfg.storeSkipTlsVerify, }) if err != nil { return nil, fmt.Errorf("cannot create store: %v", err) } e := store.NewStoreManager(kvstore, storePath) candidate := leadership.NewCandidate(kvstore, filepath.Join(storePath, common.SentinelLeaderKey), uid, store.MinTTL) return &Sentinel{ uid: uid, cfg: cfg, e: e, candidate: candidate, leader: false, initialClusterSpec: initialClusterSpec, stop: stop, end: end, UIDFn: common.UID, // This is just to choose a pseudo random keeper so // use math.rand (no need for crypto.rand) without an // initial seed. RandFn: rand.Intn, sleepInterval: cluster.DefaultSleepInterval, requestTimeout: cluster.DefaultRequestTimeout, }, nil }
func main() { c, _, err := zk.Connect([]string{"127.0.0.1"}, time.Second) //*10) if err != nil { panic(err) } children, stat, ch, err := c.ChildrenW("/") if err != nil { panic(err) } fmt.Printf("%+v %+v\n", children, stat) e := <-ch fmt.Printf("%+v\n", e) leadership.NewCandidate() c.CreateProtectedEphemeralSequential() }
func (s *ServerCommand) Run(args []string) int { s.Config = NewConfig() s.Store = NewStore(s.Config.BackendMachines, s, s.Config.Keyspace) s.Scheduler = NewScheduler() log.Info(s.Store.Client) s.Candidate = leadership.NewCandidate(s.Store.Client, s.Store.LeaderKey(), "underwood", defaultLeaderTTL) go func() { for { s.runForElection() //当服务器出现故障之类的问题,休息X时间后重试 time.Sleep(defaultRecoverTime) } }() //监听exit return s.handleSignals() }
func (this *Monitor) ServeForever() { defer this.zkzone.Close() this.startedAt = time.Now() log.Info("kguard[%s@%s] starting...", gafka.BuildId, gafka.BuiltAt) signal.RegisterHandler(func(sig os.Signal) { log.Info("kguard[%s@%s] received signal: %s", gafka.BuildId, gafka.BuiltAt, strings.ToUpper(sig.String())) this.Stop() close(this.quit) }, syscall.SIGINT, syscall.SIGTERM) // start the api server apiServer := &http.Server{ Addr: this.apiAddr, Handler: this.router, } apiListener, err := net.Listen("tcp4", this.apiAddr) if err == nil { log.Info("api http server ready on %s", this.apiAddr) go apiServer.Serve(apiListener) } else { panic(fmt.Sprintf("api http server: %v", err)) } backend, err := zookeeper.New(this.zkzone.ZkAddrList(), &store.Config{}) if err != nil { panic(err) } ip, _ := ctx.LocalIP() this.candidate = leadership.NewCandidate(backend, zk.KguardLeaderPath, ip.String(), 25*time.Second) electedCh, errCh := this.candidate.RunForElection() if err != nil { panic("Cannot run for election, store is probably down") } for { select { case isElected := <-electedCh: if isElected { log.Info("Won the election, starting all watchers") this.Start() } else { log.Warn("Fails the election, watching election events...") this.Stop() } case err := <-errCh: if err != nil { log.Error("Error during election: %v", err) } case <-this.quit: apiListener.Close() log.Info("api http server closed") log.Info("kguard[%s@%s] bye!", gafka.BuildId, gafka.BuiltAt) log.Close() return } } }