func main() { flag.Parse() if flags.Help { flag.Usage() os.Exit(2) } if flags.Version { fmt.Printf("%s main: %s, data: %s\n", path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION) os.Exit(0) } cmd.MainCommon(cbgt.VERSION, flagAliases) cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect) if err != nil { log.Fatalf("%v", err) return } if flags.IndexTypes != "" { cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ",")) } nodesToRemove := []string(nil) if len(flags.RemoveNodes) > 0 { nodesToRemove = strings.Split(flags.RemoveNodes, ",") } var steps map[string]bool if flags.Steps != "" { steps = cbgt.StringsToMap(strings.Split(flags.Steps, ",")) } // ------------------------------------------------ if steps == nil || steps["rebalance"] { log.Printf("main: step rebalance") err := runRebalance(cfg, flags.Server, nodesToRemove, flags.FavorMinNodes, flags.DryRun, flags.Verbose) if err != nil { log.Fatalf("%v", err) return } } // ------------------------------------------------ err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION, flags.Server, nodesToRemove, flags.DryRun) if err != nil { log.Fatalf("%v", err) return } log.Printf("main: done") }
func (ctl *Ctl) startCtlLOCKED( mode string, memberNodeUUIDs []string, ctlOnProgress CtlOnProgressFunc) error { ctl.incRevNumLOCKED() ctlDoneCh := make(chan struct{}) ctl.ctlDoneCh = ctlDoneCh ctlStopCh := make(chan struct{}) ctl.ctlStopCh = ctlStopCh ctlChangeTopology := &CtlChangeTopology{ Rev: fmt.Sprintf("%d", ctl.revNum), Mode: mode, MemberNodeUUIDs: memberNodeUUIDs, } ctl.ctlChangeTopology = ctlChangeTopology authType := "" if ctl.optionsMgr != nil { authType = ctl.optionsMgr["authType"] } httpGetWithAuth := func(urlStr string) (resp *http.Response, err error) { if authType == "cbauth" { return cbgt.CBAuthHttpGet(urlStr) } return http.Get(urlStr) } // The ctl goroutine. // go func() { var ctlErrs []error var ctlWarnings map[string][]string // Cleanup ctl goroutine. // defer func() { if ctlWarnings == nil { // If there were no warnings, see if there were any // warnings left in the plan. planPIndexes, _, err := cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION) if err == nil { if planPIndexes != nil { ctlWarnings = planPIndexes.Warnings } } else { ctlErrs = append(ctlErrs, err) } } memberNodes, err := CurrentMemberNodes(ctl.cfg) if err != nil { ctlErrs = append(ctlErrs, err) } ctl.m.Lock() ctl.incRevNumLOCKED() ctl.memberNodes = memberNodes if ctl.ctlDoneCh == ctlDoneCh { ctl.ctlDoneCh = nil } if ctl.ctlStopCh == ctlStopCh { ctl.ctlStopCh = nil } if ctl.ctlChangeTopology == ctlChangeTopology { ctl.ctlChangeTopology = nil } ctl.prevWarnings = ctlWarnings ctl.prevErrs = ctlErrs if ctlOnProgress != nil { ctlOnProgress(0, 0, nil, nil, nil, nil, nil, ctlErrs) } ctl.m.Unlock() close(ctlDoneCh) }() // 1) Monitor cfg to wait for wanted nodes to appear. // nodesToRemove, err := ctl.waitForWantedNodes(memberNodeUUIDs, ctlStopCh) if err != nil { log.Printf("ctl: waitForWantedNodes, err: %v", err) ctlErrs = append(ctlErrs, err) return } // 2) Run rebalance in a loop (if not failover). // failover := strings.HasPrefix(mode, "failover") if !failover { // The loop handles the case if the index definitions had // changed during the midst of the rebalance, in which // case we run rebalance again. REBALANCE_LOOP: for { // Retrieve the indexDefs before we do anything. indexDefsStart, err := cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION) if err != nil { log.Printf("ctl: PlannerGetIndexDefs, err: %v", err) ctlErrs = append(ctlErrs, err) return } if indexDefsStart == nil || len(indexDefsStart.IndexDefs) <= 0 { break REBALANCE_LOOP } // Start rebalance and monitor progress. r, err := rebalance.StartRebalance(cbgt.VERSION, ctl.cfg, ctl.server, ctl.optionsMgr, nodesToRemove, rebalance.RebalanceOptions{ FavorMinNodes: ctl.optionsCtl.FavorMinNodes, DryRun: ctl.optionsCtl.DryRun, Verbose: ctl.optionsCtl.Verbose, HttpGet: httpGetWithAuth, }) if err != nil { log.Printf("ctl: StartRebalance, err: %v", err) ctlErrs = append(ctlErrs, err) return } progressDoneCh := make(chan error) go func() { defer close(progressDoneCh) progressToString := func(maxNodeLen, maxPIndexLen int, seenNodes map[string]bool, seenNodesSorted []string, seenPIndexes map[string]bool, seenPIndexesSorted []string, progressEntries map[string]map[string]map[string]*rebalance.ProgressEntry, ) string { if ctlOnProgress != nil { return ctlOnProgress(maxNodeLen, maxPIndexLen, seenNodes, seenNodesSorted, seenPIndexes, seenPIndexesSorted, progressEntries, nil) } return rebalance.ProgressTableString( maxNodeLen, maxPIndexLen, seenNodes, seenNodesSorted, seenPIndexes, seenPIndexesSorted, progressEntries) } err = rebalance.ReportProgress(r, progressToString) if err != nil { log.Printf("ctl: ReportProgress, err: %v", err) progressDoneCh <- err } }() defer r.Stop() select { case <-ctlStopCh: return // Exit ctl goroutine. case err = <-progressDoneCh: if err != nil { ctlErrs = append(ctlErrs, err) return } } ctlWarnings = r.GetEndPlanPIndexes().Warnings // Repeat if the indexDefs had changed mid-rebalance. indexDefsEnd, err := cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION) if err != nil { ctlErrs = append(ctlErrs, err) return } if reflect.DeepEqual(indexDefsStart, indexDefsEnd) { // NOTE: There's a race or hole here where at this // point we think the indexDefs haven't changed; // but, an adversary could still change the // indexDefs before we can run the PlannerSteps(). break REBALANCE_LOOP } } } // 3) Run planner steps, like unregister and failover. // steps := map[string]bool{"unregister": true} if failover { steps["failover_"] = true } else { steps["planner"] = true } err = cmd.PlannerSteps(steps, ctl.cfg, cbgt.VERSION, ctl.server, ctl.optionsMgr, nodesToRemove, ctl.optionsCtl.DryRun, nil) if err != nil { log.Printf("ctl: PlannerSteps, err: %v", err) ctlErrs = append(ctlErrs, err) } }() return nil }
// When the index definitions have changed, our approach is to run the // planner, but only for brand new indexes that don't have any // pindexes yet. func (ctl *Ctl) IndexDefsChanged() (err error) { plannerFilterNewIndexesOnly := func(indexDef *cbgt.IndexDef, planPIndexesPrev, planPIndexes *cbgt.PlanPIndexes) bool { copyPrevPlan := func() { // Copy over the previous plan, if any, for the index. if planPIndexesPrev != nil && planPIndexes != nil { for n, p := range planPIndexesPrev.PlanPIndexes { if p.IndexName == indexDef.Name && p.IndexUUID == indexDef.UUID { planPIndexes.PlanPIndexes[n] = p // Copy over previous warnings, if any. if planPIndexes.Warnings == nil { planPIndexes.Warnings = map[string][]string{} } if planPIndexesPrev.Warnings != nil { prev := planPIndexesPrev.Warnings[indexDef.Name] if prev != nil { planPIndexes.Warnings[indexDef.Name] = prev } } } } } } // Split each indexDef into 1 or more PlanPIndexes. planPIndexesForIndex, err := cbgt.SplitIndexDefIntoPlanPIndexes( indexDef, ctl.server, ctl.optionsMgr, nil) if err != nil { copyPrevPlan() return false } for pindexName := range planPIndexesForIndex { if planPIndexesPrev.PlanPIndexes[pindexName] != nil { copyPrevPlan() return false } } return true } go func() { steps := map[string]bool{"planner": true} var nodesToRemove []string cmd.PlannerSteps(steps, ctl.cfg, cbgt.VERSION, ctl.server, ctl.optionsMgr, nodesToRemove, ctl.optionsCtl.DryRun, plannerFilterNewIndexesOnly) planPIndexes, _, err := cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION) if err == nil && planPIndexes != nil { ctl.m.Lock() ctl.incRevNumLOCKED() ctl.prevWarnings = planPIndexes.Warnings ctl.m.Unlock() } }() return nil }
func main() { flag.Parse() if flags.Help { flag.Usage() os.Exit(2) } if flags.Version { fmt.Printf("%s main: %s, data: %s\n", path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION) os.Exit(0) } cmd.MainCommon(cbgt.VERSION, flagAliases) cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect) if err != nil { log.Fatalf("%v", err) return } if flags.IndexTypes != "" { cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ",")) } nodesToRemove := []string(nil) if len(flags.RemoveNodes) > 0 { nodesToRemove = strings.Split(flags.RemoveNodes, ",") } steps := map[string]bool{} if flags.Steps != "" { steps = cbgt.StringsToMap(strings.Split(flags.Steps, ",")) } // ------------------------------------------------ if steps != nil && steps["rebalance"] { steps["rebalance_"] = true steps["unregister"] = true steps["planner"] = true } // ------------------------------------------------ if steps != nil && steps["rebalance_"] { log.Printf("main: step rebalance_") err := rebalance.RunRebalance(cfg, flags.Server, nodesToRemove, flags.FavorMinNodes, flags.DryRun, flags.Verbose, nil) if err != nil { log.Fatalf("main: RunRebalance, err: %v", err) return } } // ------------------------------------------------ err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION, flags.Server, nodesToRemove, flags.DryRun, nil) if err != nil { log.Fatalf("main: PlannerSteps, err: %v", err) return } // ------------------------------------------------ var c *ctl.Ctl if steps != nil && (steps["service"] || steps["rest"] || steps["prompt"]) { c, err = ctl.StartCtl(cfg, flags.Server, ctl.CtlOptions{ DryRun: flags.DryRun, Verbose: flags.Verbose, FavorMinNodes: flags.FavorMinNodes, WaitForMemberNodes: flags.WaitForMemberNodes, }) if err != nil { log.Fatalf("main: StartCtl, err: %v", err) return } if steps["service"] { // TODO. } if steps["rest"] { bindHttp := flags.BindHttp if bindHttp[0] == ':' { bindHttp = "localhost" + bindHttp } if strings.HasPrefix(bindHttp, "0.0.0.0:") { bindHttp = "localhost" + bindHttp[len("0.0.0.0"):] } http.Handle("/", newRestRouter(c)) go func() { log.Printf("------------------------------------------------------------") log.Printf("REST API is available: http://%s", bindHttp) log.Printf("------------------------------------------------------------") err := http.ListenAndServe(bindHttp, nil) // Blocks. if err != nil { log.Fatalf("main: listen, err: %v\n"+ " Please check that your -bindHttp parameter (%q)\n"+ " is correct and available.", err, bindHttp) } }() } if steps["prompt"] { go runCtlPrompt(c) } <-make(chan struct{}) } // ------------------------------------------------ log.Printf("main: done") }