Exemplo n.º 1
0
func initCluster(cmd *cobra.Command, args []string) {
	if len(args) > 1 {
		die("too many arguments")
	}

	data := []byte{}
	switch len(args) {
	case 1:
		data = []byte(args[0])
	case 0:
		if initOpts.file != "" {
			var err error
			if initOpts.file == "-" {
				data, err = ioutil.ReadAll(os.Stdin)
				if err != nil {
					die("cannot read from stdin: %v", err)
				}
			} else {
				data, err = ioutil.ReadFile(initOpts.file)
				if err != nil {
					die("cannot read file: %v", err)
				}
			}
		}
	}

	storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
	kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
	if err != nil {
		die("cannot create store: %v", err)
	}
	e := store.NewStoreManager(kvstore, storePath)

	cd, _, err := e.GetClusterData()
	if err != nil {
		die("cannot get cluster data: %v", err)
	}
	if cd != nil {
		stdout("WARNING: The current cluster data will be removed")
	}
	stdout("WARNING: The databases managed by the keepers will be overwrited depending on the provided cluster spec.")

	accepted := true
	if !initOpts.forceYes {
		accepted, err = askConfirmation("Are you sure you want to continue? [yes/no] ")
		if err != nil {
			die("%v", err)
		}
	}
	if !accepted {
		stdout("exiting")
		os.Exit(0)
	}

	cd, _, err = e.GetClusterData()
	if err != nil {
		die("cannot get cluster data: %v", err)
	}

	var cs *cluster.ClusterSpec
	if len(data) == 0 {
		// Define a new cluster spec with initMode "new"
		cs = &cluster.ClusterSpec{}
		cs.InitMode = cluster.ClusterInitModeNew
	} else {
		if err := json.Unmarshal(data, &cs); err != nil {
			die("failed to unmarshal cluster spec: %v", err)
		}
	}

	cs.SetDefaults()
	if err := cs.Validate(); err != nil {
		die("invalid cluster spec: %v", err)
	}

	c := cluster.NewCluster(common.UID(), cs)
	cd = cluster.NewClusterData(c)

	// We ignore if cd has been modified between reading and writing
	if err := e.PutClusterData(cd); err != nil {
		die("cannot update cluster data: %v", err)
	}
}
Exemplo n.º 2
0
func (s *Sentinel) clusterSentinelCheck(pctx context.Context) {
	s.updateMutex.Lock()
	defer s.updateMutex.Unlock()
	e := s.e

	cd, prevCDPair, err := e.GetClusterData()
	if err != nil {
		log.Error("error retrieving cluster data", zap.Error(err))
		return
	}
	if cd != nil {
		if cd.FormatVersion != cluster.CurrentCDFormatVersion {
			log.Error("unsupported clusterdata format version", zap.Uint64("version", cd.FormatVersion))
			return
		}
		if cd.Cluster != nil {
			s.sleepInterval = cd.Cluster.Spec.SleepInterval.Duration
			s.requestTimeout = cd.Cluster.Spec.RequestTimeout.Duration
		}

	}
	log.Debug("cd dump", zap.String("cd", spew.Sdump(cd)))

	if cd == nil {
		// Cluster first initialization
		if s.initialClusterSpec == nil {
			log.Info("no cluster data available, waiting for it to appear")
			return
		}
		c := cluster.NewCluster(s.UIDFn(), s.initialClusterSpec)
		log.Info("writing initial cluster data")
		newcd := cluster.NewClusterData(c)
		log.Debug("newcd dump", zap.String("newcd", spew.Sdump(newcd)))
		if _, err = e.AtomicPutClusterData(newcd, nil); err != nil {
			log.Error("error saving cluster data", zap.Error(err))
		}
		return
	}

	if err = s.setSentinelInfo(2 * s.sleepInterval); err != nil {
		log.Error("cannot update sentinel info", zap.Error(err))
		return
	}

	ctx, cancel := context.WithTimeout(pctx, s.requestTimeout)
	keepersInfo, err := s.getKeepersInfo(ctx)
	cancel()
	if err != nil {
		log.Error("err", zap.Error(err))
		return
	}
	log.Debug("keepersInfo dump", zap.String("keepersInfo", spew.Sdump(keepersInfo)))

	isLeader, leadershipCount := s.leaderInfo()
	if !isLeader {
		return
	}

	// detect if this is the first check after (re)gaining leadership
	firstRun := false
	if s.lastLeadershipCount != leadershipCount {
		firstRun = true
		s.lastLeadershipCount = leadershipCount
	}

	// if this is the first check after (re)gaining leadership reset all
	// the internal timers
	if firstRun {
		s.keeperErrorTimers = make(map[string]int64)
		s.dbErrorTimers = make(map[string]int64)
		s.keeperInfoHistories = make(KeeperInfoHistories)
	}

	newcd, newKeeperInfoHistories := s.updateKeepersStatus(cd, keepersInfo, firstRun)

	newcd, err = s.updateCluster(newcd)
	if err != nil {
		log.Error("failed to update cluster data", zap.Error(err))
		return
	}
	log.Debug("newcd dump after updateCluster", zap.String("newcd", spew.Sdump(newcd)))

	if newcd != nil {
		if _, err := e.AtomicPutClusterData(newcd, prevCDPair); err != nil {
			log.Error("error saving clusterdata", zap.Error(err))
		}
	}

	// Save the new keeperInfoHistories only on successfull cluster data
	// update or in the next run we'll think that the saved keeperInfo was
	// already applied.
	s.keeperInfoHistories = newKeeperInfoHistories
}