Beispiel #1
0
func stopInstance(name string, instance string) {
	log.Debugln("Changing instance status to stop...")
	status := srv.GetServiceInstanceStatus(name, instance)
	if status == enum.STOPPED {
		log.Debugln("Instance already stopped")
		return
	}

	err := srv.ChangeServiceInstanceStatus(name, instance, status, enum.STOPPED)
	if err != nil {
		log.WithField("err", err).Errorln("Cannot stop service instance")
		return
	}

	log.Debugln("Updating events...")
	evt_mutex.Lock()
	srvEvents := events.Service[name]
	srvEvents.Stop = append(srvEvents.Stop, instance)
	events.Service[name] = srvEvents
	evt_mutex.Unlock()

	log.Debugln("Unregistering instance...")
	srv.UnregisterServiceInstance(name, instance)

	log.WithFields(log.Fields{
		"service": name,
		"id":      instance,
	}).Infoln("stopped instance")
}
Beispiel #2
0
func notifyRemoval() {
	log.Debugln("Checking removal notification...")
	if chn.NeedsRemovalNotification() {
		log.Debugln("Needs removal notification. Sending message...")
		chn.GetRemovalChannel() <- struct{}{}
		log.Debugln("Removal notified")
		chn.SetRemovalNotification(false)
	}
}
Beispiel #3
0
func GetCoresAvailable(number int) (string, bool) {
	defer runtime.Gosched()

	cores_str := make([]string, 0, number)
	mutex_cpu.RLock()
	for i := 0; i < len(resources.CPU.Cores); i++ {
		if resources.CPU.Cores[i] == true {
			cores_str = append(cores_str, strconv.Itoa(i))
		}

		if len(cores_str) >= number {
			break
		}
	}

	if len(cores_str) < number {
		log.Debugln("Error getting available cores: number of free cores < ", number)
		mutex_cpu.RUnlock()
		return "", false
	}

	mutex_cpu.RUnlock()

	cores := strings.Join(cores_str, ",")
	log.WithFields(log.Fields{
		"number": number,
		"cores":  cores,
	}).Debugln("Getting available cores")
	return cores, true
}
Beispiel #4
0
func keepAlive(name string, id string) {
	var err error
	discoveryConf := cfg.GetAgentDiscovery()
	ticker := time.NewTicker(time.Duration(discoveryConf.TTL-1) * time.Second)
	opt := discovery.Options{
		"TTL": time.Duration(discoveryConf.TTL) * time.Second,
	}

	ch_stop := ch.CreateInstanceChannel(id)

	isntanceKey := discoveryConf.AppRoot + "/" + name + "/" + id
	instanceValue := addressMap[id]

	for {
		select {
		case <-ticker.C:
			err = discovery.Set(isntanceKey, instanceValue, opt)
			if err != nil {
				log.WithFields(log.Fields{
					"service":  name,
					"instance": id,
					"address":  instanceValue,
					"err":      err,
				}).Errorln("Error keeping instance alive")
			}
		case <-ch_stop:
			log.Debugln("Stopping keep alive routine")
			return
		}
	}
}
Beispiel #5
0
func (db *influxdb) Initialize(config map[string]interface{}) error {
	var err error
	db.config = &influxdbConfig{}
	err = utils.FillStruct(db.config, config)
	if err != nil {
		return err
	}

	log.Debugln("Initializing influxdb at address: ", db.config.Url)

	db.influx, err = client.NewHTTPClient(client.HTTPConfig{
		Addr:     db.config.Url,
		Username: db.config.Username,
		Password: db.config.Password,
	})
	if err != nil {
		return err
	}

	db.batch, err = client.NewBatchPoints(client.BatchPointsConfig{
		Database:  db.config.DbName,
		Precision: "s",
	})
	if err != nil {
		return err
	}

	return nil
}
Beispiel #6
0
func updateFriendsData(nFriends int) error {
	var err error
	log.Debugln("Updating friends data")
	err = clearFriendsData()
	if err != nil {
		return err
	}

	peers := getAllPeers()
	log.WithField("peers", len(peers)).Debugln("Number of peers")
	if len(peers) == 0 {
		return ErrNoPeers
	}

	friends, err := chooseRandomFriends(peers, nFriends)
	if err != nil {
		return err
	}
	log.WithField("friends", friends).Debugln("Friends to connect with")

	friendsData, err := getFriendsData(friends)
	if err != nil {
		return err
	}

	clusterData, err := mergeSharedData(friendsData)
	if err != nil {
		return err
	}

	data.SaveSharedCluster(clusterData)

	return nil
}
Beispiel #7
0
func collectMetrics() {
	log.Debugln("Collecting metrics")
	updateMetrics()
	err := storeMetrics(metrics)
	if err != nil {
		log.WithField("errr", err).Errorln("Error collecting agent metrics")
	}
}
Beispiel #8
0
func createMemory(memory string) int64 {
	memInBytes, err := utils.RAMInBytes(memory)
	if err != nil {
		log.Debugln("Creating Host config: Memory limit not specified")
		return 0
	}

	return memInBytes
}
Beispiel #9
0
func listen() {
	ch_action := ch.GetActionChannel()
	for {
		select {
		case msg := <-ch_action:
			log.Debugln("Received action message")
			executeActions(msg.Target, msg.Actions)
		}
	}
}
Beispiel #10
0
func nameExist(nodeName string, clusterName string) bool {
	names := cluster.ListNodes(clusterName, false)
	log.Debugln("Nodes list: ", names)
	for name, _ := range names {
		if name == nodeName {
			return true
		}
	}
	return false
}
Beispiel #11
0
func GetNodes(clusterName string, onlyActive bool) []cfg.Node {
	remote := c_GRU_REMOTE + clusterName + "/nodes"
	nodes := cfg.ReadNodes(remote)

	if onlyActive {
		active := []cfg.Node{}
		for _, node := range nodes {
			if node.Active {
				active = append(active, node)
			}
		}
		log.Debugln("Active nodes: ", active)

		return active
	}

	log.Debugln("Nodes: ", nodes)

	return nodes
}
Beispiel #12
0
func initializeNode(nodeName string, clusterName string) {
	if nodeName == "random_name" {
		nodeName = utils.GetRandomName(0)
	}
	counter := -2
	for nameExist(nodeName, clusterName) {
		nodeName = utils.GetRandomName(counter)
		counter++
	}
	log.Debugln("Node name: ", nodeName)
	node.CreateNode(nodeName, res.GetResources())
}
Beispiel #13
0
func createCpusetCpus(cpusetcpus string, cores int) string {
	if cpusetcpus == "" {
		if cores < 1 {
			log.Warnln("Number of requested CPUs = 0. Setting to 1")
			cores = 1
		}
		if assigned, ok := res.GetCoresAvailable(cores); ok {
			cpusetcpus = assigned
		} else {
			log.Debugln("Error setting cpusetcpus in hostconfig")
		}
	}

	return cpusetcpus
}
Beispiel #14
0
func updateCommand(cmd Command) {
	log.Debugln("Updating ", cmd.Target)
	switch cmd.Target {
	case "all":
		cluster := cmd.Object.(string)
		updateAll(cluster)
	case "agent":
		cluster := cmd.Object.(string)
		updateAgent(cluster)
	case "services":
		cluster := cmd.Object.(string)
		updateServices(cluster)
	case "policy":
		cluster := cmd.Object.(string)
		updatePolicy(cluster)
	case "node-base-services":
		data := cmd.Object.([]interface{})
		upd := []string{}
		for _, item := range data {
			upd = append(upd, item.(string))
		}
		constraints := cfg.GetNodeConstraints()
		constraints.BaseServices = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "node-cpumin":
		upd := cmd.Object.(float64)
		constraints := cfg.GetNodeConstraints()
		constraints.CpuMin = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "node-cpumax":
		upd := cmd.Object.(float64)
		constraints := cfg.GetNodeConstraints()
		constraints.CpuMax = upd
		cfg.WriteNodeConstraints(cfg.GetNodeConfig().Remote, *constraints)
	case "service-constraints":
		name := cmd.Object.(string)
		srv, _ := service.GetServiceByName(name)
		upd := cfg.ReadService(srv.Remote)
		srv.Constraints = upd.Constraints
	default:
		log.WithField("target", cmd.Target).Errorln("Unrecognized target for command update")
	}
}
Beispiel #15
0
func RunLoop(loopTimeInterval int) {
	// Start the metric collector
	metric.StartMetricCollector()
	// Set the ticker for the periodic execution
	ticker := time.NewTicker(time.Duration(loopTimeInterval) * time.Second)
	log.Infoln("Running autonomic loop")
	for {
		select {
		case <-ticker.C:
			stats := monitor.Run()
			analytics := analyzer.Run(stats)
			policy := planner.Run(analytics)
			executor.Run(policy)

			log.Infoln("-------------------------")

		case <-ch_err:
			log.Debugln("Error running autonomic loop")
		}
	}
}
Beispiel #16
0
func collector(contLog io.ReadCloser, ch_entry chan logEntry) {
	var err error
	var line []byte
	var data logEntry

	scanner := bufio.NewScanner(contLog)
	for scanner.Scan() {
		line = scanner.Bytes()
		if regex.Match(line) {
			data, err = getDataFromLogLine(string(line))
			if err != nil {
				log.WithField("err", err).Errorln("Error parsing container logs")
			} else {
				ch_entry <- data
			}
		}
	}

	if err = scanner.Err(); err != nil {
		log.WithField("err", err).Errorln("Error in scanner.")
	}

	log.Debugln("Stopped collector")
}
Beispiel #17
0
func RegisterCluster(name string, id string) {
	var err error
	err = discovery.Register(c_GRU_REMOTE+name+"/uuid", id)
	if err != nil {
		log.Errorln("Error registering cluster")
	}
	log.Debugln("Created cluster forder: ", name)

	opt := discovery.Options{"Dir": true}
	err = discovery.Set(c_GRU_REMOTE+name+"/"+c_NODES_REMOTE, "", opt)
	if err != nil {
		log.Errorln("Error creating nodes folder")
	}
	log.Debugln("Created nodes folder")

	err = discovery.Set(c_GRU_REMOTE+name+"/"+c_SERVICES_REMOTE, "", opt)
	if err != nil {
		log.Errorln("Error creating services folder")
	}
	log.Debugln("Created services folder")

	err = discovery.Set(c_GRU_REMOTE+name+"/"+c_CONFIG_REMOTE, "empty", discovery.Options{})
	if err != nil {
		log.Errorln("Error creating config key")
	}
	log.Debugln("Created config key")

	err = discovery.Set(c_GRU_REMOTE+name+"/"+c_POLICY_REMOTE, "empty", discovery.Options{})
	if err != nil {
		log.Errorln("Error creating policy key")
	}
	log.Debugln("Created policy key")

	err = discovery.Set(c_GRU_REMOTE+name+"/"+c_ANALYTIC_REMOTE, "", opt)
	if err != nil {
		log.Errorln("Error creating analytics folder")
	}
	log.Debugln("Created analytics folder")
}
Beispiel #18
0
func StartCollector(contLog io.ReadCloser) {
	log.Debugln("starting collector")
	go collector(contLog, ch_entry)
}
Beispiel #19
0
func updateMetrics() {
	var err error
	metrics = newMetrics()
	metrics.Node.UUID = cfg.GetNodeConfig().UUID
	metrics.Node.Name = cfg.GetNodeConfig().Name
	metrics.Node.Resources.CPU.Total = res.GetResources().CPU.Total
	metrics.Node.Resources.CPU.Availabe = res.GetResources().CPU.Total - res.GetResources().CPU.Used
	localShared, err := data.GetSharedLocal()
	if err != nil {
		log.WithField("err", err).Warnln("Cannot update node active metric")
	} else {
		metrics.Node.ActiveServices = len(localShared.System.ActiveServices)
	}

	for _, name := range service.List() {
		srv, _ := service.GetServiceByName(name)
		srv_metrics := metrics.Service[name]
		srv_metrics.Name = name
		srv_metrics.Image = srv.Image
		srv_metrics.Type = srv.Type

		srv_metrics.Instances.All = len(srv.Instances.All)
		srv_metrics.Instances.Pending = len(srv.Instances.Pending)
		srv_metrics.Instances.Running = len(srv.Instances.Running)
		srv_metrics.Instances.Paused = len(srv.Instances.Paused)
		srv_metrics.Instances.Stopped = len(srv.Instances.Stopped)

		stats, err := data.GetStats()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update stats metrics")
		} else {
			if srv_stats, ok := stats.Metrics.Service[name]; ok {
				srv_metrics.Stats = srv_stats
			} else {
				log.Warnln("Cannot find stats metrics for service ", name)
			}

			metrics.Node.Stats = stats.Metrics.System
		}

		analytics, err := data.GetAnalytics()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update analytics metrics")
		} else {
			if srv_analytisc, ok := analytics.Service[name]; ok {
				srv_metrics.Analytics = srv_analytisc
			} else {
				log.Debugln("Cannot find analytics metrics for service ", name)
			}
		}

		shared, err := data.GetSharedCluster()
		if err != nil {
			log.WithFields(log.Fields{
				"err":     err,
				"service": name,
			}).Warnln("Cannot update shared data metrics")
		} else {
			if srv_shared, ok := shared.Service[name]; ok {
				srv_metrics.Shared = srv_shared.Data
			}
		}

		metrics.Service[name] = srv_metrics
	}

	plc, err := data.GetPolicy()
	if err != nil {
		log.WithField("err", err).Warnln("Cannot update plans metrics")
	} else {
		metrics.Policy.Name = plc.Name
		metrics.Policy.Weight = plc.Weight
	}

}
Beispiel #20
0
func waitForRemoval() {
	log.Debugln("Waiting for removal confirmation...")
	<-ch.GetRemovalChannel()
	log.Debugln("Remove complete")
}
Beispiel #21
0
// Events are: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update
func eventCallback(event *dockerclient.Event, ec chan error, args ...interface{}) {
	log.Debugln("Received event")
	// By now we do not handle events with type != container
	if event.Type != "container" {
		log.WithField("type", event.Type).Debugln("Received event with type different from 'container'")
		return
	}

	service, err := srv.GetServiceByImage(event.From)
	if err != nil {
		log.WithFields(log.Fields{
			"err":   err,
			"event": event,
		}).Warnln("Cannot handle event")
		return
	}

	e := evt.Event{
		Service:  service.Name,
		Image:    event.From,
		Instance: event.ID,
		Type:     event.Type,
	}

	switch event.Status {
	case "create":
		log.WithField("image", e.Image).Debugln("Received create signal")
		evt.HandleCreateEvent(e)
		container.Docker().Client.StartMonitorStats(e.Instance, statCallBack, ch_mnt_stats_err)
	case "start":
		log.WithField("image", e.Image).Debugln("Received start signal")
		if _, ok := instBuffer[e.Instance]; !ok {
			instBuffer[e.Instance] = instanceMetricBuffer{
				cpuInst: utils.BuildBuffer(c_B_SIZE),
				cpuSys:  utils.BuildBuffer(c_B_SIZE),
			}
		}
		e.Status = enum.PENDING
		evt.HanldeStartEvent(e)
		mtr.AddInstance(e.Instance)
		if enableLogReading {
			startMonitorLog(event.ID)
		}
	case "stop":
		log.WithField("image", e.Image).Debugln("Received stop signal")
	case "kill":
		log.WithField("image", e.Image).Debugln("Received kill signal")
	case "die":
		log.WithField("image", e.Image).Debugln("Received die signal")
		delete(instBuffer, e.Instance)
		mtr.RemoveInstance(e.Instance)
		evt.HandleStopEvent(e)
	case "destroy":
		log.WithField("id", e.Instance).Debugln("Received destroy signal")
		evt.HandleRemoveEvent(e)
	default:
		log.WithFields(log.Fields{
			"err":   "event not handled",
			"event": event.Status,
			"image": event.From,
		}).Debugln("Received unknown signal")
	}

	log.Debugln("Event handled")

}
Beispiel #22
0
func SetServiceInstanceResources(name string, id string) {
	var err error

	log.Debugln("Setting new instance resources")
	// This is needed otherwise dockerclient does not
	// return the correct container information
	time.Sleep(100 * time.Millisecond)

	info, err := container.Docker().Client.InspectContainer(id)
	if err != nil {
		log.WithFields(log.Fields{
			"id":  id,
			"err": err,
		}).Errorln("Error setting instance resources")
	}

	cpusetcpus := info.HostConfig.CpusetCpus
	portBindings, err := container.GetPortBindings(id)
	if err != nil {
		log.WithFields(log.Fields{
			"service": name,
			"id":      id,
			"err":     err,
		}).Errorln("Error getting instance port binding")
	}

	log.WithFields(log.Fields{
		"service":      name,
		"cpusetcpus":   cpusetcpus,
		"portbindings": portBindings,
	}).Debugln("New instance respources")

	err = CheckAndSetSpecificCores(cpusetcpus, id)
	if err != nil {
		log.WithFields(log.Fields{
			"service": name,
			"id":      id,
			"cpus":    cpusetcpus,
			"err":     err,
		}).Errorln("Error assigning CPU resources to new instance")
	}

	err = AssignSpecifiPortsToService(name, id, portBindings)
	if err != nil {
		log.WithFields(log.Fields{
			"service":  name,
			"id":       id,
			"bindings": portBindings,
			"err":      err,
		}).Errorln("Error assigning port resources to new instance")
	}

	guestPort := service.GetDiscoveryPort(name)
	if hostPorts, ok := portBindings[guestPort]; ok {
		if len(hostPorts) > 0 {
			service.SaveInstanceAddress(id, hostPorts[0])

			log.WithFields(log.Fields{
				"service":  name,
				"instance": id,
				"guest":    guestPort,
				"host":     hostPorts[0],
			}).Debugln("Saved instance address")

		} else {
			log.WithFields(log.Fields{
				"service":  name,
				"instance": id,
				"guest":    guestPort,
			}).Debugln("Cannot register instance address: host ports < 0")
		}

	} else {
		log.WithFields(log.Fields{
			"service":  name,
			"instance": id,
			"guest":    guestPort,
		}).Debugln("Cannot register instance address: no bindings")
	}

}
Beispiel #23
0
func deactivateNode() {
	log.Debugln("Deactivating node")
	cfg.ToggleActiveNode()
	cfg.WriteNodeActive(cfg.GetNodeConfig().Remote, false)
}
Beispiel #24
0
func activateNode() {
	log.Debugln("Activating node")
	cfg.ToggleActiveNode()
	cfg.WriteNodeActive(cfg.GetNodeConfig().Remote, true)
}