func computeServicesMetrics(instMetrics map[string]data.MetricData) map[string]data.MetricData { servicesAvg := make(map[string]data.MetricData, len(servicesMetrics)) for service, metrics := range servicesMetrics { baseMetrics := make(map[string]float64) // CPU cpuAvg := computeServiceCpuPerc(service, instMetrics) baseMetrics[enum.METRIC_CPU_AVG.ToString()] = cpuAvg // MEMORY -TODO memAvg := 0.0 baseMetrics[enum.METRIC_MEM_AVG.ToString()] = memAvg userMetrics := make(map[string]float64, len(metrics.UserMetrics)) for metric, values := range metrics.UserMetrics { value := utils.Mean(values) userMetrics[metric] = value } serviceAvg := data.MetricData{ BaseMetrics: baseMetrics, UserMetrics: userMetrics, } servicesAvg[service] = serviceAvg } return servicesAvg }
func computeSysMetrics(instMetrics map[string]data.MetricData) data.MetricData { // TODO - improve by adding capacity baseMetrics := make(map[string]float64) cpuSys := 0.0 memSys := make([]float64, 0, len(instMetrics)) for instance, metrics := range instMetrics { service, err := srv.GetServiceById(instance) if err != nil { log.WithFields(log.Fields{ "instance": instance, }).Errorln("Cannot find service by instance") } else { instCpus := service.Docker.CPUnumber instCpuValue := metrics.BaseMetrics[enum.METRIC_CPU_AVG.ToString()] * float64(instCpus) // CPU cpuSys += instCpuValue // MEM // TODO } } baseMetrics[enum.METRIC_CPU_AVG.ToString()] = cpuSys / float64(res.GetResources().CPU.Total) baseMetrics[enum.METRIC_MEM_AVG.ToString()] = utils.Mean(memSys) sysMetrics := data.MetricData{ BaseMetrics: baseMetrics, } return sysMetrics }
// Returns CPU percentage average, total. func computeServiceCpuPerc(name string, instMetrics map[string]data.MetricData) float64 { service, _ := srv.GetServiceByName(name) values := make([]float64, 0) if len(service.Instances.Running) > 0 { for _, id := range service.Instances.Running { instCpuAvg := instMetrics[id].BaseMetrics[enum.METRIC_CPU_AVG.ToString()] values = append(values, instCpuAvg) } } return utils.Mean(values) }
func (p *scaleoutCreator) computeWeight(name string, clusterData data.Shared) float64 { service, _ := srv.GetServiceByName(name) if res.AvailableResourcesService(name) < 1.0 { return 0.0 } srvCores := service.Docker.CpusetCpus if srvCores != "" { if !res.CheckSpecificCoresAvailable(srvCores) { return 0.0 } } policy := cfg.GetPolicy().Scaleout metrics := policy.Metrics analytics := policy.Analytics threshold := policy.Threshold weights := []float64{} for _, metric := range metrics { if value, ok := clusterData.Service[name].Data.BaseShared[metric]; ok { weights = append(weights, p.computeMetricWeight(value, threshold)) } } for _, analytic := range analytics { if value, ok := clusterData.Service[name].Data.UserShared[analytic]; ok { weights = append(weights, p.computeMetricWeight(value, threshold)) } } policyValue := utils.Mean(weights) return policyValue }
func (p *scaleinCreator) computeWeight(name string, clusterData data.Shared) float64 { service, _ := srv.GetServiceByName(name) inst_run := len(service.Instances.Running) inst_pen := len(service.Instances.Pending) if inst_run < 1 { return 0.0 } baseServices := cfg.GetNodeConstraints().BaseServices if (inst_pen+inst_run) <= 1 && utils.ContainsString(baseServices, name) { return 0.0 } policy := cfg.GetPolicy().Scalein metrics := policy.Metrics analytics := policy.Analytics threshold := policy.Threshold weights := []float64{} for _, metric := range metrics { if value, ok := clusterData.Service[name].Data.BaseShared[metric]; ok { weights = append(weights, p.computeMetricWeight(value, threshold)) } } for _, analytic := range analytics { if value, ok := clusterData.Service[name].Data.UserShared[analytic]; ok { weights = append(weights, p.computeMetricWeight(value, threshold)) } } policyValue := utils.Mean(weights) return policyValue }
func MergeShared(toMerge []Shared) (Shared, error) { if len(toMerge) < 1 { return Shared{}, errors.New("No shared data to merge") } if len(toMerge) == 1 { return toMerge[0], nil } merged := Shared{ Service: make(map[string]ServiceShared), } for _, name := range srv.List() { srvMerged := ServiceShared{} baseValues := make(map[string][]float64) userValues := make(map[string][]float64) for _, data := range toMerge { if data.Service[name].Active { srvMerged.Active = true for analytics, value := range data.Service[name].Data.BaseShared { baseValues[analytics] = append(baseValues[analytics], value) } for analytics, value := range data.Service[name].Data.UserShared { userValues[analytics] = append(userValues[analytics], value) } } } baseMerged := make(map[string]float64, len(baseValues)) for analytics, values := range baseValues { baseMerged[analytics] = utils.Mean(values) } userMerged := make(map[string]float64, len(userValues)) for analytics, values := range userValues { userMerged[analytics] = utils.Mean(values) } srvMerged.Data.BaseShared = baseMerged srvMerged.Data.UserShared = userMerged merged.Service[name] = srvMerged } sysMerged := SystemShared{} baseValues := make(map[string][]float64) userValues := make(map[string][]float64) for _, data := range toMerge { for analytics, value := range data.System.Data.BaseShared { baseValues[analytics] = append(baseValues[analytics], value) } for analytics, value := range data.System.Data.UserShared { userValues[analytics] = append(userValues[analytics], value) } sysMerged.ActiveServices = checkAndAppend(sysMerged.ActiveServices, data.System.ActiveServices) } baseMerged := make(map[string]float64, len(baseValues)) for analytics, values := range baseValues { baseMerged[analytics] = utils.Mean(values) } userMerged := make(map[string]float64, len(userValues)) for analytics, values := range userValues { userMerged[analytics] = utils.Mean(values) } sysMerged.Data.BaseShared = baseMerged sysMerged.Data.UserShared = userMerged merged.System = sysMerged return merged, nil // var loadAvg float64 // var cpuAvg float64 // var memAvg float64 // var resourcesAvg float64 // for _, name := range service.List() { // loadAvg = 0.0 // cpuAvg = 0.0 // memAvg = 0.0 // resourcesAvg = 0.0 // counter := 0.0 // for _, info := range toMerge { // if srv, ok := info.Service[name]; ok { // if srv.Active { // loadAvg += srv.Load // cpuAvg += srv.Cpu // memAvg += srv.Memory // resourcesAvg += srv.Resources // counter++ // } // } // } // if counter > 0 { // loadAvg /= counter // cpuAvg /= counter // memAvg /= counter // resourcesAvg /= counter // mergedService := ServiceShared{ // Load: loadAvg, // Cpu: cpuAvg, // Memory: memAvg, // Resources: resourcesAvg, // Active: true, // } // merged.Service[name] = mergedService // } // } // cpuAvg = 0.0 // memAvg = 0.0 // resourcesAvg = 0.0 // healthAvg := 0.0 // activeServices := []string{} // for _, info := range toMerge { // cpuAvg += info.System.Cpu // memAvg += info.System.Memory // healthAvg += info.System.Health // activeServices = checkAndAppend(activeServices, info.System.ActiveServices) // } // lenght := float64(len(toMerge)) // cpuAvg /= lenght // memAvg /= lenght // healthAvg /= lenght // mergedSystem := SystemShared{ // Cpu: cpuAvg, // Memory: memAvg, // Health: healthAvg, // ActiveServices: activeServices, // } // merged.System = mergedSystem }
func (p *swapCreator) computeWeight(running string, candidate string, clusterData data.Shared) float64 { srv_run, _ := srv.GetServiceByName(running) srv_cand, _ := srv.GetServiceByName(candidate) nRun := len(srv_run.Instances.Running) baseServices := cfg.GetNodeConstraints().BaseServices if utils.ContainsString(baseServices, running) && nRun < 2 { return 0.0 } // If the service has the resources to start without stopping the other // there is no reason to swap them if res.AvailableResourcesService(candidate) > 0 { return 0.0 } // TODO now this works only with homogeneous containers // and taking into account only the CPUs. This is not a // a good thing, so in the feuture the swap policy should // be able to compare the resources needed by each containers // and evaulte if it is possible to swap a container with // more than one that is active, in order to obtain // the requested amount of resources. if srv_run.Docker.CPUnumber != srv_cand.Docker.CPUnumber { return 0.0 } runShared := clusterData.Service[running] candShared := clusterData.Service[candidate] policy := cfg.GetPolicy().Swap metrics := policy.Metrics analytics := policy.Analytics threshold := policy.Threshold weights := []float64{} candValue := 0.0 runValue := 0.0 for _, metric := range metrics { if value, ok := candShared.Data.BaseShared[metric]; ok { candValue = value } else { candValue = -1.0 } if value, ok := runShared.Data.BaseShared[metric]; ok { runValue = value } else { runValue = -1.0 } if candValue != -1.0 && runValue != -1.0 { delta := candValue - runValue weight := math.Min(1.0, delta/threshold) weights = append(weights, weight) } else { log.WithFields(log.Fields{ "metric": metric, "running": running, "candidate": candidate, }).Warnln("Cannot compare services: metric not present in both services") } } for _, analytic := range analytics { if value, ok := candShared.Data.UserShared[analytic]; ok { candValue = value } else { candValue = -1.0 } if value, ok := runShared.Data.UserShared[analytic]; ok { runValue = value } else { runValue = -1.0 } if candValue != -1.0 && runValue != -1.0 { delta := candValue - runValue weight := math.Min(1.0, delta/threshold) weights = append(weights, weight) } else { log.WithFields(log.Fields{ "analytic": analytic, "running": running, "candidate": candidate, }).Warnln("Cannot compare services: analytic not present in both services") } } policyValue := math.Max(0.0, utils.Mean(weights)) return policyValue }