Ejemplo n.º 1
0
// assignPIndexLOCKED updates the cfg with the pindex assignment, and
// should be invoked while holding the r.m lock.
func (r *Rebalancer) assignPIndexLOCKED(index, pindex, node, state, op string) (
	*cbgt.IndexDef, *cbgt.PlanPIndexes, string, error) {
	err := r.assignPIndexCurrStates_unlocked(index, pindex, node, state, op)
	if err != nil {
		return nil, nil, "", err
	}

	indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version)
	if err != nil {
		return nil, nil, "", err
	}

	indexDef := indexDefs.IndexDefs[index]
	if indexDef == nil {
		return nil, nil, "", fmt.Errorf("assignPIndex: no indexDef,"+
			" index: %s, pindex: %s, node: %s, state: %q, op: %s",
			index, pindex, node, state, op)
	}

	planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version)
	if err != nil {
		return nil, nil, "", err
	}

	formerPrimaryNode, err := r.updatePlanPIndexes_unlocked(planPIndexes,
		indexDef, pindex, node, state, op)
	if err != nil {
		return nil, nil, "", err
	}

	if r.optionsReb.DryRun {
		return nil, nil, formerPrimaryNode, nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas)
	if err != nil {
		return nil, nil, "", err
	}

	return indexDef, planPIndexes, formerPrimaryNode, err
}
Ejemplo n.º 2
0
// Failover promotes replicas to primary for the remaining nodes.
func Failover(cfg cbgt.Cfg, version string, server string,
	options map[string]string, nodesFailover []string) (bool, error) {
	mapNodesFailover := cbgt.StringsToMap(nodesFailover)

	uuid := ""

	indexDefs, nodeDefs, planPIndexesPrev, cas, err :=
		cbgt.PlannerGetPlan(cfg, version, uuid)
	if err != nil {
		return false, err
	}

	planPIndexesCalc, err := cbgt.CalcPlan("failover",
		indexDefs, nodeDefs, planPIndexesPrev, version, server, options, nil)
	if err != nil {
		return false, fmt.Errorf("planner: failover CalcPlan, err: %v", err)
	}

	planPIndexesNext := cbgt.CopyPlanPIndexes(planPIndexesPrev, version)
	for planPIndexName, planPIndex := range planPIndexesNext.PlanPIndexes {
		for node, planPIndexNode := range planPIndex.Nodes {
			if !mapNodesFailover[node] {
				continue
			}

			if planPIndexNode.Priority <= 0 {
				// Failover'ed node used to be a primary for this
				// pindex, so find a replica to promote.
				promoted := ""

			PROMOTE_REPLICA:
				for nodePro, ppnPro := range planPIndex.Nodes {
					if mapNodesFailover[nodePro] {
						continue
					}

					if ppnPro.Priority >= 1 {
						ppnPro.Priority = 0
						planPIndex.Nodes[nodePro] = ppnPro
						promoted = nodePro
						break PROMOTE_REPLICA
					}
				}

				// If we didn't find a replica to promote, and we're
				// configured with the option to
				// "failoverAssignAllPrimaries-IndexName" or
				// "failoverAssignAllPrimaries" (default true), then
				// assign the primary from the calculated plan.
				if promoted == "" && ParseOptionsBool(options,
					"failoverAssignAllPrimaries", planPIndex.IndexName, true) {
					planPIndexCalc, exists :=
						planPIndexesCalc.PlanPIndexes[planPIndexName]
					if exists && planPIndexCalc != nil {
					ASSIGN_PRIMARY:
						for nodeCalc, ppnCalc := range planPIndexCalc.Nodes {
							if ppnCalc.Priority <= 0 &&
								!mapNodesFailover[nodeCalc] {
								planPIndex.Nodes[nodeCalc] = ppnCalc
								promoted = nodeCalc
								break ASSIGN_PRIMARY
							}
						}
					}
				}
			}

			delete(planPIndex.Nodes, node)
		}
	}

	// TODO: Missing under-replication constraint warnings.

	if cbgt.SamePlanPIndexes(planPIndexesNext, planPIndexesPrev) {
		return false, nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(cfg, planPIndexesNext, cas)
	if err != nil {
		return false, fmt.Errorf("planner: failover could not save plan,"+
			" perhaps a concurrent planner won, cas: %d, err: %v",
			cas, err)
	}

	return true, nil
}
Ejemplo n.º 3
0
// Failover promotes replicas to primary for the remaining nodes.
func Failover(cfg cbgt.Cfg, version string, server string,
	nodesFailover []string) (bool, error) {
	mapNodesFailover := cbgt.StringsToMap(nodesFailover)

	uuid := ""

	indexDefs, nodeDefs, planPIndexesPrev, cas, err :=
		cbgt.PlannerGetPlan(cfg, version, uuid)
	if err != nil {
		return false, err
	}

	planPIndexesCalc, err := cbgt.CalcPlan("failover",
		indexDefs, nodeDefs, planPIndexesPrev, version, server)
	if err != nil {
		return false, fmt.Errorf("planner: failover CalcPlan, err: %v", err)
	}

	planPIndexesNext := cbgt.CopyPlanPIndexes(planPIndexesPrev, version)
	for planPIndexName, planPIndex := range planPIndexesNext.PlanPIndexes {
		for node, planPIndexNode := range planPIndex.Nodes {
			if !mapNodesFailover[node] {
				continue
			}

			if planPIndexNode.Priority <= 0 {
				// Failover'ed node used to be a primary for this
				// pindex, so find a replica to promote.
				promoted := ""

			PROMOTE_REPLICA:
				for nodePro, ppnPro := range planPIndex.Nodes {
					if mapNodesFailover[nodePro] {
						continue
					}

					if ppnPro.Priority >= 1 {
						ppnPro.Priority = 0
						planPIndex.Nodes[nodePro] = ppnPro
						promoted = nodePro
						break PROMOTE_REPLICA
					}
				}

				if promoted == "" {
					// Didn't find a replica to promote, so consult the
					// calculated plan for the primary assignment.
					planPIndexCalc, exists :=
						planPIndexesCalc.PlanPIndexes[planPIndexName]
					if exists && planPIndexCalc != nil {
					PROMOTE_CALC:
						for nodeCalc, ppnCalc := range planPIndexCalc.Nodes {
							if ppnCalc.Priority <= 0 &&
								!mapNodesFailover[nodeCalc] {
								planPIndex.Nodes[nodeCalc] = ppnCalc
								promoted = nodeCalc
								break PROMOTE_CALC
							}
						}
					}
				}
			}

			delete(planPIndex.Nodes, node)
		}
	}

	// TODO: Missing under-replication constraint warnings.

	if cbgt.SamePlanPIndexes(planPIndexesNext, planPIndexesPrev) {
		return false, nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(cfg, planPIndexesNext, cas)
	if err != nil {
		return false, fmt.Errorf("planner: failover could not save plan,"+
			" perhaps a concurrent planner won, cas: %d, err: %v",
			cas, err)
	}

	return true, nil
}
Ejemplo n.º 4
0
// assignPIndex is invoked when blance.OrchestrateMoves() wants to
// synchronously change the pindex/node/state/op for an index.
func (r *Rebalancer) assignPIndex(stopCh, stopCh2 chan struct{},
	index, pindex, node, state, op string) error {
	r.Logf("  assignPIndex: index: %s,"+
		" pindex: %s, node: %s, state: %q, op: %s",
		index, pindex, node, state, op)

	r.m.Lock() // Reduce but not eliminate CAS conflicts.

	err := r.assignPIndexCurrStates_unlocked(index,
		pindex, node, state, op)
	if err != nil {
		r.m.Unlock()
		return err
	}

	indexDefs, err := cbgt.PlannerGetIndexDefs(r.cfg, r.version)
	if err != nil {
		r.m.Unlock()
		return err
	}

	indexDef := indexDefs.IndexDefs[index]
	if indexDef == nil {
		r.m.Unlock()

		return fmt.Errorf("assignPIndex: no indexDef,"+
			" index: %s, pindex: %s, node: %s, state: %q, op: %s",
			index, pindex, node, state, op)
	}

	planPIndexes, cas, err := cbgt.PlannerGetPlanPIndexes(r.cfg, r.version)
	if err != nil {
		r.m.Unlock()
		return err
	}

	err = r.updatePlanPIndexes_unlocked(planPIndexes, indexDef,
		pindex, node, state, op)
	if err != nil {
		r.m.Unlock()
		return err
	}

	if r.options.DryRun {
		r.m.Unlock()
		return nil
	}

	_, err = cbgt.CfgSetPlanPIndexes(r.cfg, planPIndexes, cas)

	r.m.Unlock()

	if err != nil {
		return fmt.Errorf("assignPIndex: update plan,"+
			" perhaps a concurrent planner won, cas: %d, err: %v",
			cas, err)
	}

	return r.waitAssignPIndexDone(stopCh, stopCh2,
		indexDef, planPIndexes,
		pindex, node, state, op)
}