示例#1
0
// assignPIndexLOCKED updates the cfg with the pindex assignment, and
// should be invoked while holding the r.m lock.
func (r *Rebalancer) assignPIndexLOCKED(index, pindex, node, state, op string) (
	*cbgt.IndexDef, *cbgt.PlanPIndexes, string, error) {
	err := r.assignPIndexCurrStates_unlocked(index, pindex, node, state, op)
	if err != nil {
		return nil, nil, "", err
	}

	indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version)
	if err != nil {
		return nil, nil, "", err
	}

	indexDef := indexDefs.IndexDefs[index]
	if indexDef == nil {
		return nil, nil, "", fmt.Errorf("assignPIndex: no indexDef,"+
			" index: %s, pindex: %s, node: %s, state: %q, op: %s",
			index, pindex, node, state, op)
	}

	planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version)
	if err != nil {
		return nil, nil, "", err
	}

	formerPrimaryNode, err := r.updatePlanPIndexes_unlocked(planPIndexes,
		indexDef, pindex, node, state, op)
	if err != nil {
		return nil, nil, "", err
	}

	if r.optionsReb.DryRun {
		return nil, nil, formerPrimaryNode, nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas)
	if err != nil {
		return nil, nil, "", err
	}

	return indexDef, planPIndexes, formerPrimaryNode, err
}
示例#2
0
文件: ctl.go 项目: couchbase/cbgt
func (ctl *Ctl) startCtlLOCKED(
	mode string,
	memberNodeUUIDs []string,
	ctlOnProgress CtlOnProgressFunc) error {
	ctl.incRevNumLOCKED()

	ctlDoneCh := make(chan struct{})
	ctl.ctlDoneCh = ctlDoneCh

	ctlStopCh := make(chan struct{})
	ctl.ctlStopCh = ctlStopCh

	ctlChangeTopology := &CtlChangeTopology{
		Rev:             fmt.Sprintf("%d", ctl.revNum),
		Mode:            mode,
		MemberNodeUUIDs: memberNodeUUIDs,
	}
	ctl.ctlChangeTopology = ctlChangeTopology

	authType := ""
	if ctl.optionsMgr != nil {
		authType = ctl.optionsMgr["authType"]
	}

	httpGetWithAuth := func(urlStr string) (resp *http.Response, err error) {
		if authType == "cbauth" {
			return cbgt.CBAuthHttpGet(urlStr)
		}

		return http.Get(urlStr)
	}

	// The ctl goroutine.
	//
	go func() {
		var ctlErrs []error
		var ctlWarnings map[string][]string

		// Cleanup ctl goroutine.
		//
		defer func() {
			if ctlWarnings == nil {
				// If there were no warnings, see if there were any
				// warnings left in the plan.
				planPIndexes, _, err :=
					cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION)
				if err == nil {
					if planPIndexes != nil {
						ctlWarnings = planPIndexes.Warnings
					}
				} else {
					ctlErrs = append(ctlErrs, err)
				}
			}

			memberNodes, err := CurrentMemberNodes(ctl.cfg)
			if err != nil {
				ctlErrs = append(ctlErrs, err)
			}

			ctl.m.Lock()

			ctl.incRevNumLOCKED()

			ctl.memberNodes = memberNodes

			if ctl.ctlDoneCh == ctlDoneCh {
				ctl.ctlDoneCh = nil
			}

			if ctl.ctlStopCh == ctlStopCh {
				ctl.ctlStopCh = nil
			}

			if ctl.ctlChangeTopology == ctlChangeTopology {
				ctl.ctlChangeTopology = nil
			}

			ctl.prevWarnings = ctlWarnings
			ctl.prevErrs = ctlErrs

			if ctlOnProgress != nil {
				ctlOnProgress(0, 0, nil, nil, nil, nil, nil, ctlErrs)
			}

			ctl.m.Unlock()

			close(ctlDoneCh)
		}()

		// 1) Monitor cfg to wait for wanted nodes to appear.
		//
		nodesToRemove, err :=
			ctl.waitForWantedNodes(memberNodeUUIDs, ctlStopCh)
		if err != nil {
			log.Printf("ctl: waitForWantedNodes, err: %v", err)
			ctlErrs = append(ctlErrs, err)
			return
		}

		// 2) Run rebalance in a loop (if not failover).
		//
		failover := strings.HasPrefix(mode, "failover")
		if !failover {
			// The loop handles the case if the index definitions had
			// changed during the midst of the rebalance, in which
			// case we run rebalance again.
		REBALANCE_LOOP:
			for {
				// Retrieve the indexDefs before we do anything.
				indexDefsStart, err :=
					cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION)
				if err != nil {
					log.Printf("ctl: PlannerGetIndexDefs, err: %v", err)

					ctlErrs = append(ctlErrs, err)
					return
				}

				if indexDefsStart == nil ||
					len(indexDefsStart.IndexDefs) <= 0 {
					break REBALANCE_LOOP
				}

				// Start rebalance and monitor progress.
				r, err := rebalance.StartRebalance(cbgt.VERSION,
					ctl.cfg, ctl.server, ctl.optionsMgr,
					nodesToRemove,
					rebalance.RebalanceOptions{
						FavorMinNodes: ctl.optionsCtl.FavorMinNodes,
						DryRun:        ctl.optionsCtl.DryRun,
						Verbose:       ctl.optionsCtl.Verbose,
						HttpGet:       httpGetWithAuth,
					})
				if err != nil {
					log.Printf("ctl: StartRebalance, err: %v", err)

					ctlErrs = append(ctlErrs, err)
					return
				}

				progressDoneCh := make(chan error)
				go func() {
					defer close(progressDoneCh)

					progressToString := func(maxNodeLen, maxPIndexLen int,
						seenNodes map[string]bool,
						seenNodesSorted []string,
						seenPIndexes map[string]bool,
						seenPIndexesSorted []string,
						progressEntries map[string]map[string]map[string]*rebalance.ProgressEntry,
					) string {
						if ctlOnProgress != nil {
							return ctlOnProgress(maxNodeLen, maxPIndexLen,
								seenNodes,
								seenNodesSorted,
								seenPIndexes,
								seenPIndexesSorted,
								progressEntries,
								nil)
						}

						return rebalance.ProgressTableString(
							maxNodeLen, maxPIndexLen,
							seenNodes,
							seenNodesSorted,
							seenPIndexes,
							seenPIndexesSorted,
							progressEntries)
					}

					err = rebalance.ReportProgress(r, progressToString)
					if err != nil {
						log.Printf("ctl: ReportProgress, err: %v", err)
						progressDoneCh <- err
					}
				}()

				defer r.Stop()

				select {
				case <-ctlStopCh:
					return // Exit ctl goroutine.

				case err = <-progressDoneCh:
					if err != nil {
						ctlErrs = append(ctlErrs, err)
						return
					}
				}

				ctlWarnings = r.GetEndPlanPIndexes().Warnings

				// Repeat if the indexDefs had changed mid-rebalance.
				indexDefsEnd, err :=
					cbgt.PlannerGetIndexDefs(ctl.cfg, cbgt.VERSION)
				if err != nil {
					ctlErrs = append(ctlErrs, err)
					return
				}

				if reflect.DeepEqual(indexDefsStart, indexDefsEnd) {
					// NOTE: There's a race or hole here where at this
					// point we think the indexDefs haven't changed;
					// but, an adversary could still change the
					// indexDefs before we can run the PlannerSteps().
					break REBALANCE_LOOP
				}
			}
		}

		// 3) Run planner steps, like unregister and failover.
		//
		steps := map[string]bool{"unregister": true}
		if failover {
			steps["failover_"] = true
		} else {
			steps["planner"] = true
		}

		err = cmd.PlannerSteps(steps, ctl.cfg, cbgt.VERSION,
			ctl.server, ctl.optionsMgr, nodesToRemove,
			ctl.optionsCtl.DryRun, nil)
		if err != nil {
			log.Printf("ctl: PlannerSteps, err: %v", err)
			ctlErrs = append(ctlErrs, err)
		}
	}()

	return nil
}
示例#3
0
文件: ctl.go 项目: couchbase/cbgt
// When the index definitions have changed, our approach is to run the
// planner, but only for brand new indexes that don't have any
// pindexes yet.
func (ctl *Ctl) IndexDefsChanged() (err error) {
	plannerFilterNewIndexesOnly := func(indexDef *cbgt.IndexDef,
		planPIndexesPrev, planPIndexes *cbgt.PlanPIndexes) bool {
		copyPrevPlan := func() {
			// Copy over the previous plan, if any, for the index.
			if planPIndexesPrev != nil && planPIndexes != nil {
				for n, p := range planPIndexesPrev.PlanPIndexes {
					if p.IndexName == indexDef.Name &&
						p.IndexUUID == indexDef.UUID {
						planPIndexes.PlanPIndexes[n] = p

						// Copy over previous warnings, if any.
						if planPIndexes.Warnings == nil {
							planPIndexes.Warnings = map[string][]string{}
						}

						if planPIndexesPrev.Warnings != nil {
							prev := planPIndexesPrev.Warnings[indexDef.Name]
							if prev != nil {
								planPIndexes.Warnings[indexDef.Name] = prev
							}
						}
					}
				}
			}
		}

		// Split each indexDef into 1 or more PlanPIndexes.
		planPIndexesForIndex, err := cbgt.SplitIndexDefIntoPlanPIndexes(
			indexDef, ctl.server, ctl.optionsMgr, nil)
		if err != nil {
			copyPrevPlan()
			return false
		}

		for pindexName := range planPIndexesForIndex {
			if planPIndexesPrev.PlanPIndexes[pindexName] != nil {
				copyPrevPlan()
				return false
			}
		}

		return true
	}

	go func() {
		steps := map[string]bool{"planner": true}

		var nodesToRemove []string

		cmd.PlannerSteps(steps, ctl.cfg, cbgt.VERSION,
			ctl.server, ctl.optionsMgr, nodesToRemove, ctl.optionsCtl.DryRun,
			plannerFilterNewIndexesOnly)

		planPIndexes, _, err :=
			cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION)
		if err == nil && planPIndexes != nil {
			ctl.m.Lock()
			ctl.incRevNumLOCKED()
			ctl.prevWarnings = planPIndexes.Warnings
			ctl.m.Unlock()
		}
	}()

	return nil
}
示例#4
0
文件: ctl.go 项目: couchbase/cbgt
func (ctl *Ctl) run() {
	defer close(ctl.doneCh)

	memberNodes, err := CurrentMemberNodes(ctl.cfg)
	if err != nil {
		ctl.initCh <- err
		close(ctl.initCh)
		return
	}

	planPIndexes, _, err := cbgt.PlannerGetPlanPIndexes(ctl.cfg, cbgt.VERSION)
	if err != nil {
		ctl.initCh <- err
		close(ctl.initCh)
		return
	}

	ctl.m.Lock()

	ctl.incRevNumLOCKED()

	ctl.memberNodes = memberNodes

	if planPIndexes != nil {
		ctl.prevWarnings = planPIndexes.Warnings
	}

	ctl.m.Unlock()

	// -----------------------------------------------------------

	err = ctl.cfg.Subscribe(cbgt.INDEX_DEFS_KEY, ctl.cfgEventCh)
	if err != nil {
		ctl.initCh <- err
		close(ctl.initCh)
		return
	}

	var lastIndexDefs *cbgt.IndexDefs

	kickIndexDefs := func(kind string) error {
		log.Printf("ctl: kickIndexDefs, kind: %s", kind)

		indexDefs, _, err := cbgt.CfgGetIndexDefs(ctl.cfg)
		if err != nil {
			log.Printf("ctl: kickIndexDefs, kind: %s, CfgGetIndexDefs,"+
				" err: %v", kind, err)
			return err
		}

		if kind == "init" || kind == "force" || kind == "force-indexDefs" ||
			!reflect.DeepEqual(lastIndexDefs, indexDefs) {
			err = ctl.IndexDefsChanged()
			if err != nil {
				log.Printf("ctl: kickIndexDefs, kind: %s, IndexDefsChanged,"+
					" err: %v", kind, err)
			}
		}

		lastIndexDefs = indexDefs

		return err
	}

	err = kickIndexDefs("init")
	if err != nil {
		ctl.initCh <- err
		close(ctl.initCh)
		return
	}

	// -----------------------------------------------------------

	close(ctl.initCh)

	for {
		select {
		case <-ctl.stopCh:
			ctl.dispatchCtl("", "stop", nil, nil)
			return

		case <-ctl.cfgEventCh:
			kickIndexDefs("cfgEvent")
		}
	}
}
示例#5
0
// assignPIndex is invoked when blance.OrchestrateMoves() wants to
// synchronously change the pindex/node/state/op for an index.
func (r *Rebalancer) assignPIndex(stopCh, stopCh2 chan struct{},
	index, pindex, node, state, op string) error {
	r.Logf("  assignPIndex: index: %s,"+
		" pindex: %s, node: %s, state: %q, op: %s",
		index, pindex, node, state, op)

	r.m.Lock() // Reduce but not eliminate CAS conflicts.

	err := r.assignPIndexCurrStates_unlocked(index,
		pindex, node, state, op)
	if err != nil {
		r.m.Unlock()
		return err
	}

	indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version)
	if err != nil {
		r.m.Unlock()
		return err
	}

	indexDef := indexDefs.IndexDefs[index]
	if indexDef == nil {
		r.m.Unlock()

		return fmt.Errorf("assignPIndex: no indexDef,"+
			" index: %s, pindex: %s, node: %s, state: %q, op: %s",
			index, pindex, node, state, op)
	}

	planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version)
	if err != nil {
		r.m.Unlock()
		return err
	}

	err = r.updatePlanPIndexes_unlocked(planPIndexes, indexDef,
		pindex, node, state, op)
	if err != nil {
		r.m.Unlock()
		return err
	}

	if r.options.DryRun {
		r.m.Unlock()
		return nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas)

	r.m.Unlock()

	if err != nil {
		return fmt.Errorf("assignPIndex: update plan,"+
			" perhaps a concurrent planner won, cas: %d, err: %v",
			cas, err)
	}

	return r.waitAssignPIndexDone(stopCh, stopCh2,
		indexDef, planPIndexes,
		pindex, node, state, op)
}