func main() { flag.Parse() if flags.Help { flag.Usage() os.Exit(2) } if flags.Version { fmt.Printf("%s main: %s, data: %s\n", path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION) os.Exit(0) } cmd.MainCommon(cbgt.VERSION, flagAliases) cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect) if err != nil { log.Fatalf("%v", err) return } if flags.IndexTypes != "" { cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ",")) } nodesToRemove := []string(nil) if len(flags.RemoveNodes) > 0 { nodesToRemove = strings.Split(flags.RemoveNodes, ",") } var steps map[string]bool if flags.Steps != "" { steps = cbgt.StringsToMap(strings.Split(flags.Steps, ",")) } // ------------------------------------------------ if steps == nil || steps["rebalance"] { log.Printf("main: step rebalance") err := runRebalance(cfg, flags.Server, nodesToRemove, flags.FavorMinNodes, flags.DryRun, flags.Verbose) if err != nil { log.Fatalf("%v", err) return } } // ------------------------------------------------ err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION, flags.Server, nodesToRemove, flags.DryRun) if err != nil { log.Fatalf("%v", err) return } log.Printf("main: done") }
// Failover promotes replicas to primary for the remaining nodes. func Failover(cfg cbgt.Cfg, version string, server string, options map[string]string, nodesFailover []string) (bool, error) { mapNodesFailover := cbgt.StringsToMap(nodesFailover) uuid := "" indexDefs, nodeDefs, planPIndexesPrev, cas, err := cbgt.PlannerGetPlan(cfg, version, uuid) if err != nil { return false, err } planPIndexesCalc, err := cbgt.CalcPlan("failover", indexDefs, nodeDefs, planPIndexesPrev, version, server, options, nil) if err != nil { return false, fmt.Errorf("planner: failover CalcPlan, err: %v", err) } planPIndexesNext := cbgt.CopyPlanPIndexes(planPIndexesPrev, version) for planPIndexName, planPIndex := range planPIndexesNext.PlanPIndexes { for node, planPIndexNode := range planPIndex.Nodes { if !mapNodesFailover[node] { continue } if planPIndexNode.Priority <= 0 { // Failover'ed node used to be a primary for this // pindex, so find a replica to promote. promoted := "" PROMOTE_REPLICA: for nodePro, ppnPro := range planPIndex.Nodes { if mapNodesFailover[nodePro] { continue } if ppnPro.Priority >= 1 { ppnPro.Priority = 0 planPIndex.Nodes[nodePro] = ppnPro promoted = nodePro break PROMOTE_REPLICA } } // If we didn't find a replica to promote, and we're // configured with the option to // "failoverAssignAllPrimaries-IndexName" or // "failoverAssignAllPrimaries" (default true), then // assign the primary from the calculated plan. if promoted == "" && ParseOptionsBool(options, "failoverAssignAllPrimaries", planPIndex.IndexName, true) { planPIndexCalc, exists := planPIndexesCalc.PlanPIndexes[planPIndexName] if exists && planPIndexCalc != nil { ASSIGN_PRIMARY: for nodeCalc, ppnCalc := range planPIndexCalc.Nodes { if ppnCalc.Priority <= 0 && !mapNodesFailover[nodeCalc] { planPIndex.Nodes[nodeCalc] = ppnCalc promoted = nodeCalc break ASSIGN_PRIMARY } } } } } delete(planPIndex.Nodes, node) } } // TODO: Missing under-replication constraint warnings. if cbgt.SamePlanPIndexes(planPIndexesNext, planPIndexesPrev) { return false, nil } _, err = cbgt.CfgSetPlanPIndexes(cfg, planPIndexesNext, cas) if err != nil { return false, fmt.Errorf("planner: failover could not save plan,"+ " perhaps a concurrent planner won, cas: %d, err: %v", cas, err) } return true, nil }
// Failover promotes replicas to primary for the remaining nodes. func Failover(cfg cbgt.Cfg, version string, server string, nodesFailover []string) (bool, error) { mapNodesFailover := cbgt.StringsToMap(nodesFailover) uuid := "" indexDefs, nodeDefs, planPIndexesPrev, cas, err := cbgt.PlannerGetPlan(cfg, version, uuid) if err != nil { return false, err } planPIndexesCalc, err := cbgt.CalcPlan("failover", indexDefs, nodeDefs, planPIndexesPrev, version, server) if err != nil { return false, fmt.Errorf("planner: failover CalcPlan, err: %v", err) } planPIndexesNext := cbgt.CopyPlanPIndexes(planPIndexesPrev, version) for planPIndexName, planPIndex := range planPIndexesNext.PlanPIndexes { for node, planPIndexNode := range planPIndex.Nodes { if !mapNodesFailover[node] { continue } if planPIndexNode.Priority <= 0 { // Failover'ed node used to be a primary for this // pindex, so find a replica to promote. promoted := "" PROMOTE_REPLICA: for nodePro, ppnPro := range planPIndex.Nodes { if mapNodesFailover[nodePro] { continue } if ppnPro.Priority >= 1 { ppnPro.Priority = 0 planPIndex.Nodes[nodePro] = ppnPro promoted = nodePro break PROMOTE_REPLICA } } if promoted == "" { // Didn't find a replica to promote, so consult the // calculated plan for the primary assignment. planPIndexCalc, exists := planPIndexesCalc.PlanPIndexes[planPIndexName] if exists && planPIndexCalc != nil { PROMOTE_CALC: for nodeCalc, ppnCalc := range planPIndexCalc.Nodes { if ppnCalc.Priority <= 0 && !mapNodesFailover[nodeCalc] { planPIndex.Nodes[nodeCalc] = ppnCalc promoted = nodeCalc break PROMOTE_CALC } } } } } delete(planPIndex.Nodes, node) } } // TODO: Missing under-replication constraint warnings. if cbgt.SamePlanPIndexes(planPIndexesNext, planPIndexesPrev) { return false, nil } _, err = cbgt.CfgSetPlanPIndexes(cfg, planPIndexesNext, cas) if err != nil { return false, fmt.Errorf("planner: failover could not save plan,"+ " perhaps a concurrent planner won, cas: %d, err: %v", cas, err) } return true, nil }
func main() { flag.Parse() if flags.Help { flag.Usage() os.Exit(2) } if flags.Version { fmt.Printf("%s main: %s, data: %s\n", path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION) os.Exit(0) } cmd.MainCommon(cbgt.VERSION, flagAliases) cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect) if err != nil { log.Fatalf("%v", err) return } if flags.IndexTypes != "" { cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ",")) } nodesToRemove := []string(nil) if len(flags.RemoveNodes) > 0 { nodesToRemove = strings.Split(flags.RemoveNodes, ",") } steps := map[string]bool{} if flags.Steps != "" { steps = cbgt.StringsToMap(strings.Split(flags.Steps, ",")) } // ------------------------------------------------ if steps != nil && steps["rebalance"] { steps["rebalance_"] = true steps["unregister"] = true steps["planner"] = true } // ------------------------------------------------ if steps != nil && steps["rebalance_"] { log.Printf("main: step rebalance_") err := rebalance.RunRebalance(cfg, flags.Server, nodesToRemove, flags.FavorMinNodes, flags.DryRun, flags.Verbose, nil) if err != nil { log.Fatalf("main: RunRebalance, err: %v", err) return } } // ------------------------------------------------ err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION, flags.Server, nodesToRemove, flags.DryRun, nil) if err != nil { log.Fatalf("main: PlannerSteps, err: %v", err) return } // ------------------------------------------------ var c *ctl.Ctl if steps != nil && (steps["service"] || steps["rest"] || steps["prompt"]) { c, err = ctl.StartCtl(cfg, flags.Server, ctl.CtlOptions{ DryRun: flags.DryRun, Verbose: flags.Verbose, FavorMinNodes: flags.FavorMinNodes, WaitForMemberNodes: flags.WaitForMemberNodes, }) if err != nil { log.Fatalf("main: StartCtl, err: %v", err) return } if steps["service"] { // TODO. } if steps["rest"] { bindHttp := flags.BindHttp if bindHttp[0] == ':' { bindHttp = "localhost" + bindHttp } if strings.HasPrefix(bindHttp, "0.0.0.0:") { bindHttp = "localhost" + bindHttp[len("0.0.0.0"):] } http.Handle("/", newRestRouter(c)) go func() { log.Printf("------------------------------------------------------------") log.Printf("REST API is available: http://%s", bindHttp) log.Printf("------------------------------------------------------------") err := http.ListenAndServe(bindHttp, nil) // Blocks. if err != nil { log.Fatalf("main: listen, err: %v\n"+ " Please check that your -bindHttp parameter (%q)\n"+ " is correct and available.", err, bindHttp) } }() } if steps["prompt"] { go runCtlPrompt(c) } <-make(chan struct{}) } // ------------------------------------------------ log.Printf("main: done") }