コード例 #1
0
ファイル: auto_scale_memory.go プロジェクト: tsuru/tsuru
func (a *memoryScaler) scale(groupMetadata string, nodes []*cluster.Node) (*scalerResult, error) {
	plans, err := app.PlansList()
	if err != nil {
		return nil, errors.Wrap(err, "couldn't list plans")
	}
	var maxPlanMemory int64
	for _, plan := range plans {
		if plan.Memory > maxPlanMemory {
			maxPlanMemory = plan.Memory
		}
	}
	if maxPlanMemory == 0 {
		var defaultPlan *app.Plan
		defaultPlan, err = app.DefaultPlan()
		if err != nil {
			return nil, errors.Wrap(err, "couldn't get default plan")
		}
		maxPlanMemory = defaultPlan.Memory
	}
	chosenNodes, err := a.chooseNodeForRemoval(maxPlanMemory, groupMetadata, nodes)
	if err != nil {
		return nil, err
	}
	if chosenNodes != nil {
		return &scalerResult{
			ToRemove: chosenNodes,
			Reason:   fmt.Sprintf("containers can be distributed in only %d nodes", len(nodes)-len(chosenNodes)),
		}, nil
	}
	memoryData, err := a.nodesMemoryData(nodes)
	if err != nil {
		return nil, err
	}
	canFitMax := false
	var totalReserved, totalMem int64
	for _, node := range nodes {
		data := memoryData[node.Address]
		if maxPlanMemory > data.maxMemory {
			return nil, errors.Errorf("aborting, impossible to fit max plan memory of %d bytes, node max available memory is %d", maxPlanMemory, data.maxMemory)
		}
		totalReserved += data.reserved
		totalMem += data.maxMemory
		if data.available >= maxPlanMemory {
			canFitMax = true
			break
		}
	}
	if canFitMax {
		return &scalerResult{}, nil
	}
	nodesToAdd := int((totalReserved + maxPlanMemory) / totalMem)
	if nodesToAdd == 0 {
		return &scalerResult{}, nil
	}
	return &scalerResult{
		ToAdd:  nodesToAdd,
		Reason: fmt.Sprintf("can't add %d bytes to an existing node", maxPlanMemory),
	}, nil
}
コード例 #2
0
ファイル: plan.go プロジェクト: 4eek/tsuru
func listPlans(w http.ResponseWriter, r *http.Request, t auth.Token) error {
	plans, err := app.PlansList()
	if err != nil {
		return err
	}
	w.Header().Set("Content-Type", "application/json")
	return json.NewEncoder(w).Encode(plans)
}
コード例 #3
0
ファイル: auto_scale.go プロジェクト: keymon/tsuru
func (a *memoryScaler) scale(event *autoScaleEvent, groupMetadata string, nodes []*cluster.Node) error {
	plans, err := app.PlansList()
	if err != nil {
		return fmt.Errorf("couldn't list plans: %s", err)
	}
	var maxPlanMemory int64
	for _, plan := range plans {
		if plan.Memory > maxPlanMemory {
			maxPlanMemory = plan.Memory
		}
	}
	if maxPlanMemory == 0 {
		defaultPlan, err := app.DefaultPlan()
		if err != nil {
			return fmt.Errorf("couldn't get default plan: %s", err)
		}
		maxPlanMemory = defaultPlan.Memory
	}
	chosenNode, err := a.choseNodeForRemoval(maxPlanMemory, groupMetadata, nodes)
	if err != nil {
		return fmt.Errorf("unable to choose node for removal: %s", err)
	}
	if chosenNode != nil {
		event.updateNode(chosenNode)
		err = event.update(scaleActionRemove, fmt.Sprintf("containers from %s can be distributed in cluster", chosenNode.Address))
		if err != nil {
			return err
		}
		a.logDebug("running event %q for %q: %s", event.Action, event.MetadataValue, event.Reason)
		return a.removeNode(chosenNode)
	}
	memoryData, err := a.nodesMemoryData(a.provisioner, nodes)
	if err != nil {
		return err
	}
	canFitMax := false
	for _, node := range nodes {
		data := memoryData[node.Address]
		a.logDebug("checking scale up, node %s, memory data: %#v", node.Address, data)
		if maxPlanMemory > data.maxMemory {
			return fmt.Errorf("aborting, impossible to fit max plan memory of %d bytes, node max available memory is %d", maxPlanMemory, data.maxMemory)
		}
		if data.available >= maxPlanMemory {
			canFitMax = true
			break
		}
	}
	if canFitMax {
		return nil
	}
	err = event.update(scaleActionAdd, fmt.Sprintf("can't add %d bytes to an existing node", maxPlanMemory))
	if err != nil {
		return err
	}
	a.logDebug("running event %q for %q: %s", event.Action, event.MetadataValue, event.Reason)
	newNode, err := a.addNode(nodes)
	if err != nil {
		return err
	}
	event.updateNode(newNode)
	return nil
}