예제 #1
0
func startService(name string) {
	log.WithField("name", name).Debugln("Starting service")
	toStart, err := service.GetServiceByName(name)
	if err != nil {
		log.WithField("name", name).Debugln("Error starting service")
	}
	ch.SendActionStartMessage(toStart)
}
예제 #2
0
파일: executor.go 프로젝트: elleFlorio/gru
func getTargetService(name string) *cfg.Service {
	var srv *cfg.Service
	srv, err := service.GetServiceByName(name)
	if err != nil {
		srv = &cfg.Service{Name: "noservice"}
	}

	return srv
}
예제 #3
0
파일: monitor.go 프로젝트: elleFlorio/gru
func updateSystemInstances(services []string) {
	cfg.ClearNodeInstances()
	instances := cfg.GetNodeInstances()
	for _, name := range services {
		service, _ := srv.GetServiceByName(name)
		instances.All = append(instances.All, service.Instances.All...)
		instances.Pending = append(instances.Pending, service.Instances.Pending...)
		instances.Running = append(instances.Running, service.Instances.Running...)
		instances.Stopped = append(instances.Stopped, service.Instances.Stopped...)
		instances.Paused = append(instances.Paused, service.Instances.Paused...)
	}
}
예제 #4
0
func TestUpdateSystemInstances(t *testing.T) {
	defer resetMockServices()
	list := srv.List()

	updateSystemInstances(list)

	srv1, _ := srv.GetServiceByName("service1")
	srv2, _ := srv.GetServiceByName("service2")
	tot_all := len(srv1.Instances.All) + len(srv2.Instances.All)
	tot_pen := len(srv1.Instances.Pending) + len(srv2.Instances.Pending)
	tot_run := len(srv1.Instances.Running) + len(srv2.Instances.Running)
	tot_stop := len(srv1.Instances.Stopped) + len(srv2.Instances.Stopped)
	tot_pause := len(srv1.Instances.Paused) + len(srv2.Instances.Paused)

	instances := cfg.GetNodeInstances()
	assert.Len(t, instances.All, tot_all)
	assert.Len(t, instances.Pending, tot_pen)
	assert.Len(t, instances.Running, tot_run)
	assert.Len(t, instances.Stopped, tot_stop)
	assert.Len(t, instances.Paused, tot_pause)
}
예제 #5
0
파일: monitor.go 프로젝트: elleFlorio/gru
func displayStatsOfServices(stats data.GruStats) {
	for name, value := range stats.Metrics.Service {
		service, _ := srv.GetServiceByName(name)
		log.WithFields(log.Fields{
			"pending:": len(service.Instances.Pending),
			"running:": len(service.Instances.Running),
			"stopped:": len(service.Instances.Stopped),
			"paused:":  len(service.Instances.Paused),
			"cpu avg":  fmt.Sprintf("%.2f", value.BaseMetrics[enum.METRIC_CPU_AVG.ToString()]),
			"mem avg":  fmt.Sprintf("%.2f", value.BaseMetrics[enum.METRIC_MEM_AVG.ToString()]),
		}).Infoln("Stats computed: ", name)
	}
}
예제 #6
0
func stopService(name string) {
	log.WithField("name", name).Debugln("Stopping service")
	toStop, err := service.GetServiceByName(name)
	if err != nil {
		log.WithField("name", name).Debugln("Error stopping service")
	}
	if len(toStop.Instances.All) < 1 {
		log.WithField("service", name).Debugln("No active instance to stop")
		return
	}

	ch.SendActionStopMessage(toStop)
}
예제 #7
0
// Returns CPU percentage average, total.
func computeServiceCpuPerc(name string, instMetrics map[string]data.MetricData) float64 {

	service, _ := srv.GetServiceByName(name)
	values := make([]float64, 0)

	if len(service.Instances.Running) > 0 {
		for _, id := range service.Instances.Running {
			instCpuAvg := instMetrics[id].BaseMetrics[enum.METRIC_CPU_AVG.ToString()]
			values = append(values, instCpuAvg)
		}
	}

	return utils.Mean(values)
}
예제 #8
0
func TestBuildConfig(t *testing.T) {
	defer cfg.CleanServices()
	services := service.CreateMockServices()
	cfg.SetServices(services)
	resources.CreateMockResources(2, "1G", 0, "0G")
	ports1 := map[string]string{
		"50100": "50100",
	}
	resources.InitializeServiceAvailablePorts("service1", ports1)
	service1, _ := service.GetServiceByName("service1")

	config := buildConfig(service1, enum.START)
	assert.Equal(t, "0", config.HostConfig.CpusetCpus)
}
예제 #9
0
func TestCreateHostConfig(t *testing.T) {
	defer cfg.CleanServices()
	services := service.CreateMockServices()
	cfg.SetServices(services)
	resources.CreateMockResources(2, "1G", 0, "0G")
	ports1 := map[string]string{
		"50100": "50100",
	}
	resources.InitializeServiceAvailablePorts("service1", ports1)
	service1, _ := service.GetServiceByName("service1")

	hostConfigStop := createHostConfig(service1, enum.STOP)
	assert.Equal(t, hostConfigStop.CpusetCpus, "")

	hostConfigStart := createHostConfig(service1, enum.START)
	assert.Equal(t, hostConfigStart.CpusetCpus, "0")
	assert.Len(t, hostConfigStart.PortBindings, 1)
}
예제 #10
0
func updateCommand(cmd Command) {
	log.Debugln("Updating ", cmd.Target)
	switch cmd.Target {
	case "all":
		cluster := cmd.Object.(string)
		updateAll(cluster)
	case "agent":
		cluster := cmd.Object.(string)
		updateAgent(cluster)
	case "services":
		cluster := cmd.Object.(string)
		updateServices(cluster)
	case "policy":
		cluster := cmd.Object.(string)
		updatePolicy(cluster)
	case "node-base-services":
		data := cmd.Object.([]interface{})
		upd := []string{}
		for _, item := range data {
			upd = append(upd, item.(string))
		}
		constraints := cfg.GetNodeConstraints()
		constraints.BaseServices = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "node-cpumin":
		upd := cmd.Object.(float64)
		constraints := cfg.GetNodeConstraints()
		constraints.CpuMin = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "node-cpumax":
		upd := cmd.Object.(float64)
		constraints := cfg.GetNodeConstraints()
		constraints.CpuMax = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "service-constraints":
		name := cmd.Object.(string)
		srv, _ := service.GetServiceByName(name)
		upd := cfg.ReadService(srv.Remote)
		srv.Constraints = upd.Constraints
	default:
		log.WithField("target", cmd.Target).Errorln("Unrecognized target for command update")
	}
}
예제 #11
0
파일: swap.go 프로젝트: elleFlorio/gru
func (p *swapCreator) createSwapPairs(srvList []string) map[string][]string {
	pairs := map[string][]string{}

	running := []string{}
	inactive := []string{}

	for _, name := range srvList {
		service, _ := srv.GetServiceByName(name)
		if len(service.Instances.Running) > 0 {
			running = append(running, name)
		} else {
			inactive = append(inactive, name)
		}
	}

	for _, name := range running {
		pairs[name] = inactive
	}

	return pairs
}
예제 #12
0
func TestCreateContainerConfig(t *testing.T) {
	defer cfg.CleanServices()
	services := service.CreateMockServices()
	cfg.SetServices(services)
	resources.CreateMockResources(2, "1G", 0, "0G")
	service1 := "service1"
	id1 := "pippo"
	availablePorts1 := map[string]string{
		"50100": "50100",
	}
	ports1 := map[string][]string{
		"50100": []string{"50100"},
	}
	resources.InitializeServiceAvailablePorts(service1, availablePorts1)
	resources.AssignSpecifiPortsToService(service1, id1, ports1)
	srv1, _ := service.GetServiceByName(service1)

	containerConfigStop := createContainerConfig(srv1, enum.STOP)
	assert.Empty(t, containerConfigStop.ExposedPorts)
	containerConfigStart := createContainerConfig(srv1, enum.START)
	assert.NotEmpty(t, containerConfigStart.ExposedPorts)
}
예제 #13
0
파일: monitor.go 프로젝트: elleFlorio/gru
func updateRunningInstances(services []string, threshold int) {
	for _, name := range services {
		service, _ := srv.GetServiceByName(name)
		pending := service.Instances.Pending

		for _, instance := range pending {
			if mtr.IsReadyForRunning(instance, threshold) {
				// TODO
				e := evt.Event{
					Service:  name,
					Instance: instance,
					Status:   enum.PENDING,
				}
				evt.HandlePromoteEvent(e)
				log.WithFields(log.Fields{
					"service":  name,
					"instance": instance,
				}).Debugln("Promoted resource to running state")
			}
		}
	}
}
예제 #14
0
파일: scaleout.go 프로젝트: elleFlorio/gru
func (p *scaleoutCreator) computeWeight(name string, clusterData data.Shared) float64 {
	service, _ := srv.GetServiceByName(name)

	if res.AvailableResourcesService(name) < 1.0 {
		return 0.0
	}

	srvCores := service.Docker.CpusetCpus
	if srvCores != "" {
		if !res.CheckSpecificCoresAvailable(srvCores) {
			return 0.0
		}
	}

	policy := cfg.GetPolicy().Scaleout
	metrics := policy.Metrics
	analytics := policy.Analytics
	threshold := policy.Threshold
	weights := []float64{}

	for _, metric := range metrics {
		if value, ok := clusterData.Service[name].Data.BaseShared[metric]; ok {
			weights = append(weights, p.computeMetricWeight(value, threshold))
		}
	}

	for _, analytic := range analytics {
		if value, ok := clusterData.Service[name].Data.UserShared[analytic]; ok {
			weights = append(weights, p.computeMetricWeight(value, threshold))
		}
	}

	policyValue := utils.Mean(weights)

	return policyValue
}
예제 #15
0
파일: scalein.go 프로젝트: elleFlorio/gru
func (p *scaleinCreator) computeWeight(name string, clusterData data.Shared) float64 {
	service, _ := srv.GetServiceByName(name)
	inst_run := len(service.Instances.Running)
	inst_pen := len(service.Instances.Pending)

	if inst_run < 1 {
		return 0.0
	}

	baseServices := cfg.GetNodeConstraints().BaseServices
	if (inst_pen+inst_run) <= 1 && utils.ContainsString(baseServices, name) {
		return 0.0
	}

	policy := cfg.GetPolicy().Scalein
	metrics := policy.Metrics
	analytics := policy.Analytics
	threshold := policy.Threshold
	weights := []float64{}

	for _, metric := range metrics {
		if value, ok := clusterData.Service[name].Data.BaseShared[metric]; ok {
			weights = append(weights, p.computeMetricWeight(value, threshold))
		}
	}

	for _, analytic := range analytics {
		if value, ok := clusterData.Service[name].Data.UserShared[analytic]; ok {
			weights = append(weights, p.computeMetricWeight(value, threshold))
		}
	}

	policyValue := utils.Mean(weights)

	return policyValue
}
예제 #16
0
func AvailableResourcesService(name string) float64 {
	var err error

	nodeCpu := resources.CPU.Total
	nodeUsedCpu := resources.CPU.Used
	nodeMem := resources.Memory.Total
	nodeUsedMem := resources.Memory.Used

	srv, _ := service.GetServiceByName(name)
	srvCpu := srv.Docker.CPUnumber
	log.WithFields(log.Fields{
		"service": name,
		"cpus":    srvCpu,
	}).Debugln("Service cpu resources")

	var srvMem int64
	if srv.Docker.Memory != "" {
		srvMem, err = utils.RAMInBytes(srv.Docker.Memory)
		if err != nil {
			log.WithField("err", err).Warnln("Cannot convert service RAM in Bytes.")
			return 0.0
		}
	} else {
		srvMem = 0
	}

	if nodeCpu < int64(srvCpu) || nodeMem < int64(srvMem) {
		return 0.0
	}

	if (nodeCpu-nodeUsedCpu) < int64(srvCpu) || (nodeMem-nodeUsedMem) < int64(srvMem) {
		return 0.0
	}

	return 1.0
}
예제 #17
0
func initializeNetworkResources() {
	for _, name := range service.List() {
		srv, _ := service.GetServiceByName(name)
		InitializeServiceAvailablePorts(name, srv.Docker.Ports)
	}
}
예제 #18
0
파일: swap.go 프로젝트: elleFlorio/gru
func (p *swapCreator) computeWeight(running string, candidate string, clusterData data.Shared) float64 {
	srv_run, _ := srv.GetServiceByName(running)
	srv_cand, _ := srv.GetServiceByName(candidate)
	nRun := len(srv_run.Instances.Running)
	baseServices := cfg.GetNodeConstraints().BaseServices

	if utils.ContainsString(baseServices, running) && nRun < 2 {
		return 0.0
	}

	// If the service has the resources to start without stopping the other
	// there is no reason to swap them
	if res.AvailableResourcesService(candidate) > 0 {
		return 0.0
	}

	// TODO now this works only with homogeneous containers
	// and taking into account only the CPUs. This is not a
	// a good thing, so in the feuture the swap policy should
	// be able to compare the resources needed by each containers
	// and evaulte if it is possible to swap a container with
	// more than one that is active, in order to obtain
	// the requested amount of resources.
	if srv_run.Docker.CPUnumber != srv_cand.Docker.CPUnumber {
		return 0.0
	}

	runShared := clusterData.Service[running]
	candShared := clusterData.Service[candidate]
	policy := cfg.GetPolicy().Swap
	metrics := policy.Metrics
	analytics := policy.Analytics
	threshold := policy.Threshold
	weights := []float64{}

	candValue := 0.0
	runValue := 0.0
	for _, metric := range metrics {
		if value, ok := candShared.Data.BaseShared[metric]; ok {
			candValue = value
		} else {
			candValue = -1.0
		}

		if value, ok := runShared.Data.BaseShared[metric]; ok {
			runValue = value
		} else {
			runValue = -1.0
		}

		if candValue != -1.0 && runValue != -1.0 {
			delta := candValue - runValue
			weight := math.Min(1.0, delta/threshold)
			weights = append(weights, weight)
		} else {
			log.WithFields(log.Fields{
				"metric":    metric,
				"running":   running,
				"candidate": candidate,
			}).Warnln("Cannot compare services: metric not present in both services")
		}
	}

	for _, analytic := range analytics {
		if value, ok := candShared.Data.UserShared[analytic]; ok {
			candValue = value
		} else {
			candValue = -1.0
		}

		if value, ok := runShared.Data.UserShared[analytic]; ok {
			runValue = value
		} else {
			runValue = -1.0
		}

		if candValue != -1.0 && runValue != -1.0 {
			delta := candValue - runValue
			weight := math.Min(1.0, delta/threshold)
			weights = append(weights, weight)
		} else {
			log.WithFields(log.Fields{
				"analytic":  analytic,
				"running":   running,
				"candidate": candidate,
			}).Warnln("Cannot compare services: analytic not present in both services")
		}
	}

	policyValue := math.Max(0.0, utils.Mean(weights))

	return policyValue
}
예제 #19
0
func updateMetrics() {
	var err error
	metrics = newMetrics()
	metrics.Node.UUID = cfg.GetNodeConfig().UUID
	metrics.Node.Name = cfg.GetNodeConfig().Name
	metrics.Node.Resources.CPU.Total = res.GetResources().CPU.Total
	metrics.Node.Resources.CPU.Availabe = res.GetResources().CPU.Total - res.GetResources().CPU.Used
	localShared, err := data.GetSharedLocal()
	if err != nil {
		log.WithField("err", err).Warnln("Cannot update node active metric")
	} else {
		metrics.Node.ActiveServices = len(localShared.System.ActiveServices)
	}

	for _, name := range service.List() {
		srv, _ := service.GetServiceByName(name)
		srv_metrics := metrics.Service[name]
		srv_metrics.Name = name
		srv_metrics.Image = srv.Image
		srv_metrics.Type = srv.Type

		srv_metrics.Instances.All = len(srv.Instances.All)
		srv_metrics.Instances.Pending = len(srv.Instances.Pending)
		srv_metrics.Instances.Running = len(srv.Instances.Running)
		srv_metrics.Instances.Paused = len(srv.Instances.Paused)
		srv_metrics.Instances.Stopped = len(srv.Instances.Stopped)

		stats, err := data.GetStats()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update stats metrics")
		} else {
			if srv_stats, ok := stats.Metrics.Service[name]; ok {
				srv_metrics.Stats = srv_stats
			} else {
				log.Warnln("Cannot find stats metrics for service ", name)
			}

			metrics.Node.Stats = stats.Metrics.System
		}

		analytics, err := data.GetAnalytics()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update analytics metrics")
		} else {
			if srv_analytisc, ok := analytics.Service[name]; ok {
				srv_metrics.Analytics = srv_analytisc
			} else {
				log.Debugln("Cannot find analytics metrics for service ", name)
			}
		}

		shared, err := data.GetSharedCluster()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update shared data metrics")
		} else {
			if srv_shared, ok := shared.Service[name]; ok {
				srv_metrics.Shared = srv_shared.Data
			}
		}

		metrics.Service[name] = srv_metrics
	}

	plc, err := data.GetPolicy()
	if err != nil {
		log.WithField("err", err).Warnln("Cannot update plans metrics")
	} else {
		metrics.Policy.Name = plc.Name
		metrics.Policy.Weight = plc.Weight
	}

}