// assignPIndexLOCKED updates the cfg with the pindex assignment, and // should be invoked while holding the r.m lock. func (r *Rebalancer) assignPIndexLOCKED(index, pindex, node, state, op string) ( *cbgt.IndexDef, *cbgt.PlanPIndexes, string, error) { err := r.assignPIndexCurrStates_unlocked(index, pindex, node, state, op) if err != nil { return nil, nil, "", err } indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version) if err != nil { return nil, nil, "", err } indexDef := indexDefs.IndexDefs[index] if indexDef == nil { return nil, nil, "", fmt.Errorf("assignPIndex: no indexDef,"+ " index: %s, pindex: %s, node: %s, state: %q, op: %s", index, pindex, node, state, op) } planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version) if err != nil { return nil, nil, "", err } formerPrimaryNode, err := r.updatePlanPIndexes_unlocked(planPIndexes, indexDef, pindex, node, state, op) if err != nil { return nil, nil, "", err } if r.optionsReb.DryRun { return nil, nil, formerPrimaryNode, nil } _, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas) if err != nil { return nil, nil, "", err } return indexDef, planPIndexes, formerPrimaryNode, err }
func (ctl *Ctl) startCtlLOCKED( mode string, memberNodeUUIDs []string, ctlOnProgress CtlOnProgressFunc) error { ctl.incRevNumLOCKED() ctlDoneCh := make(chan struct{}) ctl.ctlDoneCh = ctlDoneCh ctlStopCh := make(chan struct{}) ctl.ctlStopCh = ctlStopCh ctlChangeTopology := &CtlChangeTopology{ Rev: fmt.Sprintf("%d", ctl.revNum), Mode: mode, MemberNodeUUIDs: memberNodeUUIDs, } ctl.ctlChangeTopology = ctlChangeTopology authType := "" if ctl.optionsMgr != nil { authType = ctl.optionsMgr["authType"] } httpGetWithAuth := func(urlStr string) (resp *http.Response, err error) { if authType == "cbauth" { return cbgt.CBAuthHttpGet(urlStr) } return http.Get(urlStr) } // The ctl goroutine. // go func() { var ctlErrs []error var ctlWarnings map[string][]string // Cleanup ctl goroutine. // defer func() { if ctlWarnings == nil { // If there were no warnings, see if there were any // warnings left in the plan. planPIndexes, _, err := cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION) if err == nil { if planPIndexes != nil { ctlWarnings = planPIndexes.Warnings } } else { ctlErrs = append(ctlErrs, err) } } memberNodes, err := CurrentMemberNodes(ctl.cfg) if err != nil { ctlErrs = append(ctlErrs, err) } ctl.m.Lock() ctl.incRevNumLOCKED() ctl.memberNodes = memberNodes if ctl.ctlDoneCh == ctlDoneCh { ctl.ctlDoneCh = nil } if ctl.ctlStopCh == ctlStopCh { ctl.ctlStopCh = nil } if ctl.ctlChangeTopology == ctlChangeTopology { ctl.ctlChangeTopology = nil } ctl.prevWarnings = ctlWarnings ctl.prevErrs = ctlErrs if ctlOnProgress != nil { ctlOnProgress(0, 0, nil, nil, nil, nil, nil, ctlErrs) } ctl.m.Unlock() close(ctlDoneCh) }() // 1) Monitor cfg to wait for wanted nodes to appear. // nodesToRemove, err := ctl.waitForWantedNodes(memberNodeUUIDs, ctlStopCh) if err != nil { log.Printf("ctl: waitForWantedNodes, err: %v", err) ctlErrs = append(ctlErrs, err) return } // 2) Run rebalance in a loop (if not failover). // failover := strings.HasPrefix(mode, "failover") if !failover { // The loop handles the case if the index definitions had // changed during the midst of the rebalance, in which // case we run rebalance again. REBALANCE_LOOP: for { // Retrieve the indexDefs before we do anything. indexDefsStart, err := cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION) if err != nil { log.Printf("ctl: PlannerGetIndexDefs, err: %v", err) ctlErrs = append(ctlErrs, err) return } if indexDefsStart == nil || len(indexDefsStart.IndexDefs) <= 0 { break REBALANCE_LOOP } // Start rebalance and monitor progress. r, err := rebalance.StartRebalance(cbgt.VERSION, ctl.cfg, ctl.server, ctl.optionsMgr, nodesToRemove, rebalance.RebalanceOptions{ FavorMinNodes: ctl.optionsCtl.FavorMinNodes, DryRun: ctl.optionsCtl.DryRun, Verbose: ctl.optionsCtl.Verbose, HttpGet: httpGetWithAuth, }) if err != nil { log.Printf("ctl: StartRebalance, err: %v", err) ctlErrs = append(ctlErrs, err) return } progressDoneCh := make(chan error) go func() { defer close(progressDoneCh) progressToString := func(maxNodeLen, maxPIndexLen int, seenNodes map[string]bool, seenNodesSorted []string, seenPIndexes map[string]bool, seenPIndexesSorted []string, progressEntries map[string]map[string]map[string]*rebalance.ProgressEntry, ) string { if ctlOnProgress != nil { return ctlOnProgress(maxNodeLen, maxPIndexLen, seenNodes, seenNodesSorted, seenPIndexes, seenPIndexesSorted, progressEntries, nil) } return rebalance.ProgressTableString( maxNodeLen, maxPIndexLen, seenNodes, seenNodesSorted, seenPIndexes, seenPIndexesSorted, progressEntries) } err = rebalance.ReportProgress(r, progressToString) if err != nil { log.Printf("ctl: ReportProgress, err: %v", err) progressDoneCh <- err } }() defer r.Stop() select { case <-ctlStopCh: return // Exit ctl goroutine. case err = <-progressDoneCh: if err != nil { ctlErrs = append(ctlErrs, err) return } } ctlWarnings = r.GetEndPlanPIndexes().Warnings // Repeat if the indexDefs had changed mid-rebalance. indexDefsEnd, err := cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION) if err != nil { ctlErrs = append(ctlErrs, err) return } if reflect.DeepEqual(indexDefsStart, indexDefsEnd) { // NOTE: There's a race or hole here where at this // point we think the indexDefs haven't changed; // but, an adversary could still change the // indexDefs before we can run the PlannerSteps(). break REBALANCE_LOOP } } } // 3) Run planner steps, like unregister and failover. // steps := map[string]bool{"unregister": true} if failover { steps["failover_"] = true } else { steps["planner"] = true } err = cmd.PlannerSteps(steps, ctl.cfg, cbgt.VERSION, ctl.server, ctl.optionsMgr, nodesToRemove, ctl.optionsCtl.DryRun, nil) if err != nil { log.Printf("ctl: PlannerSteps, err: %v", err) ctlErrs = append(ctlErrs, err) } }() return nil }
// assignPIndex is invoked when blance.OrchestrateMoves() wants to // synchronously change the pindex/node/state/op for an index. func (r *Rebalancer) assignPIndex(stopCh, stopCh2 chan struct{}, index, pindex, node, state, op string) error { r.Logf(" assignPIndex: index: %s,"+ " pindex: %s, node: %s, state: %q, op: %s", index, pindex, node, state, op) r.m.Lock() // Reduce but not eliminate CAS conflicts. err := r.assignPIndexCurrStates_unlocked(index, pindex, node, state, op) if err != nil { r.m.Unlock() return err } indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version) if err != nil { r.m.Unlock() return err } indexDef := indexDefs.IndexDefs[index] if indexDef == nil { r.m.Unlock() return fmt.Errorf("assignPIndex: no indexDef,"+ " index: %s, pindex: %s, node: %s, state: %q, op: %s", index, pindex, node, state, op) } planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version) if err != nil { r.m.Unlock() return err } err = r.updatePlanPIndexes_unlocked(planPIndexes, indexDef, pindex, node, state, op) if err != nil { r.m.Unlock() return err } if r.options.DryRun { r.m.Unlock() return nil } _, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas) r.m.Unlock() if err != nil { return fmt.Errorf("assignPIndex: update plan,"+ " perhaps a concurrent planner won, cas: %d, err: %v", cas, err) } return r.waitAssignPIndexDone(stopCh, stopCh2, indexDef, planPIndexes, pindex, node, state, op) }