Пример #1
0
func (c *Resources) checkResources(cluster core.ClusterInterface, service framework.ServiceConfig) error {
	requiredInstances, err := c.requiredInstances(cluster, service)
	if err != nil {
		return err
	}

	if requiredInstances == 0 {
		return nil
	}

	slaves, err := cluster.Slaves()
	if err != nil {
		return err
	}

	allowedInstances := 0
	for _, v := range slaves {
		allowed := v.AllowedInstances(service.CPUShares, float64(service.Memory))
		logger.Instance().Infof("Checking slave %s - Available CPU %f Mem %f - Allowed instances %d", v.Hostname, v.AvailableCpu(), v.AvailableMem(), allowed)
		if allowed > 0 {
			allowedInstances += allowed
		}
	}

	if allowedInstances < requiredInstances {
		return fmt.Errorf("Cluster %s doesn't have enough resources. It only accept %d instances.", cluster.Id(), allowedInstances)
	}
	logger.Instance().Infof("Checking Resources in cluster %s done", cluster.Id())

	return nil
}
Пример #2
0
func (sm *ManagerInfo) Rollback() bool {
	logger.Instance().Infoln("Starting Rollback")

	sm.lockDeploy.Lock()
	defer sm.lockDeploy.Unlock()

	var mutex sync.Mutex
	rollbackOk := true

	var wg sync.WaitGroup
	wg.Add(len(sm.Clusters()))
	for _, cluster := range sm.Clusters() {
		go func(c core.ClusterInterface) {
			defer wg.Done()

			if err := c.Rollback(); err != nil {
				mutex.Lock()
				rollbackOk = false
				mutex.Unlock()
				logger.Instance().Infof("Rollback is finished on cluster with error %s", err.Error())
			}
		}(cluster)
	}
	wg.Wait()

	return rollbackOk
}
Пример #3
0
// setupClusters initializes the cluster, mapping the id of the cluster as its key
func (sm *ManagerInfo) setupClusters(config map[string]configuration.Cluster, clusterFilter *regexp.Regexp) error {
	if clusterFilter == nil {
		logger.Instance().Infoln("Cluster filter is empty. Using all clusters")
		clusterFilter = regexp.MustCompile(".*")
	}
	for key := range config {
		if !clusterFilter.MatchString(key) {
			logger.Instance().Infof("Cluster %s with configuration detected but filtered.", key)
			continue
		}

		cluster, err := core.NewCluster(key, config[key])
		if err != nil {
			switch err.(type) {
			case *core.ClusterDisabled:
				logger.Instance().Warnln(err.Error())
				continue
			default:
				return err
			}
		}

		sm.clusters[key] = cluster
	}

	if len(sm.clusters) == 0 {
		return errors.New("Should exist at least one cluster")
	}
	return nil
}
Пример #4
0
func deployCmd(c *cli.Context) error {
	sb := core.ServiceConfigurationBuilder{
		ServiceId:             c.String("service-id"),
		RegistryDomain:        c.String("registry"),
		Namespace:             c.String("namespace"),
		Tag:                   c.String("tag"),
		Cpu:                   c.Float64("cpu"),
		Memory:                c.Float64("memory"),
		MinimumHealthCapacity: c.Float64("minimumHealthCapacity"),
		MaximumOverCapacity:   c.Float64("maximumOverCapacity"),
		SyslogTag:             c.String("syslog-tag"),
		Instances:             c.Int("instances"),
		JobUrl:                c.String("deploy-url"),
		ContactEmail:          c.String("contact-email"),
	}

	envs, err := util.ParseMultiFileLinesToArray(c.StringSlice("env-file"))
	if err != nil {
		logger.Instance().Fatalln("Error parsing environment files", err)
	}
	sb.AddEnvironmentVars(envs)
	sb.AddEnvironmentVars(c.StringSlice("env"))

	sb.AddConstraints(c.StringSlice("constraint"))

	sb.AddPorts(c.StringSlice("port"))
	sb.AddLabels(c.StringSlice("label"))

	handleDeploySigTerm(clusterManager)
	if clusterManager.DeployWithRollbackOnError(sb, c.Bool("force")) {
		logger.Instance().Infoln("Deployment READY")
		var resume []callbackResume
		for clusterKey, cluster := range clusterManager.Clusters() {
			logger.Instance().Infof("Services in Cluster %s :", clusterKey)
			for _, service := range cluster.Services() {
				for _, instance := range service.Instances {
					for _, val := range instance.Ports {
						logger.Instance().Infof("Deployed %s at host %s and address %+v", instance.ID, instance.Host, val)
						instanceInfo := callbackResume{
							Id:         instance.ID,
							Address:    instance.Host + ":" + strconv.FormatInt(val.Internal, 10),
							ClusterKey: clusterKey,
						}
						resume = append(resume, instanceInfo)
					}
				}
			}
		}
		//jsonResume, _ := json.Marshal(resume)
		//fmt.Println(string(jsonResume))
		return nil
	}

	return fmt.Errorf("Deployment-Process terminated with errors")
}
Пример #5
0
func Register(check IntegrityCheck) {
	if check == nil {
		logger.Instance().Fatal("Invalid Integrity Check")
	}
	_, registered := checkers[check.Id()]
	if registered {
		logger.Instance().Fatalf("IntegrityCheck %s already registered", check.Id())
	}

	checkers[check.Id()] = check
}
Пример #6
0
func (c *DockerImageExists) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if Docker Image Exists")

	cfg := cfgBuilder.BuildForCluster("")
	if err := c.client.PullImage(cfg.Registry, cfg.Image(), cfg.Tag); err != nil {
		return err
	}

	logger.Instance().Infoln("Docker Image exists, check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Пример #7
0
func (c *Resources) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking Resources in all clusters")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		if err := c.checkResources(cluster, cfg); err != nil {
			return err
		}
	}

	logger.Instance().Infoln("Min Hosts Per Cluster Check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Пример #8
0
func (sm *ManagerInfo) checkIntegrity(serviceConfig core.ServiceConfigurationBuilder) bool {
	logger.Instance().Infoln("Checking Cluster Integrity")

	if sm.CheckChain() == nil {
		logger.Instance().Infoln("There are no checks.")
		return true
	}

	err := sm.CheckChain().Check(serviceConfig)
	if err != nil {
		logger.Instance().Errorf("Integrity Check fail. Cause: %s", err)
	}
	return err == nil
}
func (c *DockerUserRestriction) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if Docker Image run with a valid user")

	cfg := cfgBuilder.BuildForCluster("")
	img, err := c.client.InspectImage(cfg.FullImageName())
	if err != nil {
		return err
	}

	if c.userRegexp.MatchString(img.Config.User) {
		return fmt.Errorf("User %s isn't allowed", img.Config.User)
	}

	logger.Instance().Infof("Docker Image runs with a valid user (%s), check done", img.Config.User)
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Пример #10
0
func (sm *ManagerInfo) FindServiceInformation(search string) []*framework.ServiceInformation {
	var mutex sync.Mutex
	allServices := make([]*framework.ServiceInformation, 0)

	var wg sync.WaitGroup
	wg.Add(len(sm.clusters))
	for _, cluster := range sm.clusters {
		go func(c core.ClusterInterface) {
			defer wg.Done()

			services, err := c.FindServiceInformation(search)
			if err != nil {
				logger.Instance().Warnf("Error finding services in cluster %s with error: %s", c.Id(), err)
				return
			} else if len(services) != 0 {
				// TODO: DEBE UN MISMO SERVICIO AGREGARSE AL ARRAY COMO OTRO ELEMENTO? O SE DEBE HACER APPEN DE LAS INSTANCIAS?
				mutex.Lock()
				allServices = append(allServices, services...)
				mutex.Unlock()
			}
		}(cluster)
	}
	wg.Wait()

	return allServices
}
Пример #11
0
func (sm *ManagerInfo) deploy(serviceConfig core.ServiceConfigurationBuilder, force bool) bool {
	sm.lockDeploy.Lock()
	defer sm.lockDeploy.Unlock()

	var mutex sync.Mutex
	deployOk := true

	var wg sync.WaitGroup
	wg.Add(len(sm.Clusters()))
	for _, cluster := range sm.Clusters() {
		go func(c core.ClusterInterface) {
			defer wg.Done()
			copyConfig := serviceConfig
			//config := copyConfig.BuildForCluster(c.Id())
			if err := c.DeployService(copyConfig, force); err != nil {
				logger.Instance().Errorf("Error deploying service in cluster %s with error: %s", c.Id(), err)
				mutex.Lock()
				deployOk = false
				mutex.Unlock()
			}
		}(cluster)
	}
	wg.Wait()

	return deployOk
}
Пример #12
0
func deleteCmd(c *cli.Context) error {
	err := clusterManager.DeleteService(c.String("service-id"))
	if err != nil {
		return fmt.Errorf("Error deleting service. %s", err)
	}
	logger.Instance().Infoln("Service deleted: ", c.String("service-id"))
	return nil
}
Пример #13
0
func (c *MinHostsPerCluster) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if the service meets the minimum of instances per cluster")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		minHosts, err := strconv.Atoi(cluster.Constraints()[c.constraintId])
		if err != nil {
			return fmt.Errorf("Invalid constraint")
		}
		if minHosts > cfg.Instances {
			return fmt.Errorf("Error trying to deploy %d instances in Cluster %s configured with a min of %d instances", cfg.Instances, clusterKey, minHosts)
		}
		logger.Instance().Infof("The service needs %d instances and meets the minimum of instances %d required in cluster %s", cfg.Instances, minHosts, clusterKey)
	}

	logger.Instance().Infoln("Minimum instances per cluster check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Пример #14
0
func ParseSingleFileLinesToArray(path string) ([]string, error) {
	logger.Instance().Debugf("Parseando el archivo %s", path)
	file, err := os.Open(path)
	if err != nil {
		return nil, err
	}
	defer file.Close()

	var lines []string
	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		text := scanner.Text()
		if text != "" && !strings.HasPrefix(text, "#") {
			lines = append(lines, text)
		}
	}
	logger.Instance().Debugln("Parseo exitoso")
	return lines, scanner.Err()
}
Пример #15
0
func validateKeyValueAndFilter(kvs []string, filterEnv func([]string)) {
	for _, elm := range kvs {
		splittedKv := strings.SplitN(elm, "=", 2)
		if len(splittedKv) != 2 {
			logger.Instance().Warnf("Key=Value format invalid of %s. Will ignore this one", splittedKv)
			continue
		}
		filterEnv(splittedKv)
	}
}
Пример #16
0
func (c *SameHealthCheck) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Initializing Health Configuration Check in all clusters")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		for _, hc := range cfg.HealthCheckConfig {
			logger.Instance().Infof("Checking port %d/%s Health Configuration in cluster %s", hc.GetPort(), hc.GetProtocol(), clusterKey)
			path, err := cluster.HealthCheck(cfg.ServiceName, cfg.CompatibilityVersion, hc.GetPort())
			if err != nil {
				return err
			}
			if path != hc.GetPath() {
				return fmt.Errorf("There are different Health Checks Configuration: expected %s - configured in %s: %s", hc.GetPath(), clusterKey, path)
			}
			logger.Instance().Infof("Health Check in cluster %s is ok: %s", clusterKey, path)
		}
	}

	logger.Instance().Infoln("The service has good health paths, check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Пример #17
0
func (c *Resources) requiredInstances(cluster core.ClusterInterface, service framework.ServiceConfig) (int, error) {
	requiredInstances := 0
	deployedService, err := cluster.FindService(service.Namespace, service.ServiceName, service.CompatibilityVersion)
	if err != nil {
		return 0, err
	}

	if deployedService == nil {
		requiredInstances = service.Instances
		logger.Instance().Infof("The service is new in cluster %s, deploy needs resources for %d instances", cluster.Id(), requiredInstances)
	} else if len(deployedService.Instances) == service.Instances {
		requiredInstances = int(math.Max(1, math.Floor(float64(service.Instances)*0.2)))
		logger.Instance().Infof("Service replacement in cluster %s, deploy needs resources for %d instances", cluster.Id(), requiredInstances)
	} else if len(deployedService.Instances) < service.Instances {
		requiredInstances = service.Instances - len(deployedService.Instances)
		logger.Instance().Infof("Service up scale in cluster %s, deploy needs resources for %d instances", cluster.Id(), requiredInstances)
	} else {
		logger.Instance().Infof("Service down scale in cluster %s, the cluster should have enough resources", cluster.Id())
	}

	return requiredInstances, nil
}
Пример #18
0
func (sm *ManagerInfo) DeployWithRollbackOnError(serviceConfig core.ServiceConfigurationBuilder, force bool) bool {
	logger.Instance().Infof("Deploying services")

	if !sm.checkIntegrity(serviceConfig) {
		return false
	}

	if sm.deploy(serviceConfig, force) {
		return true
	}

	sm.Rollback()
	return false
}
Пример #19
0
func newClusterMock() *Cluster {
	c := new(Cluster)
	c.id = clusterName
	c.frameworkApiHelper = &FrameworkMock{}
	c.services = make([]*framework.ServiceInformation, 0)
	c.kvStore = &ConsulCliKvMock{}
	c.mesosCli = &MesosCliMock{}
	c.defaultEnvironments = []string{"FOO=BAR", "QWE=RTY"}

	c.log = logger.Instance().WithFields(log.Fields{
		"cluster": c.id,
	})

	return c
}
Пример #20
0
func AddOrReplaceKv(key string, value string, kvs []KeyValue) []KeyValue {
	replace := false
	for id, kv := range kvs {
		if kv.Key == key {
			logger.Instance().Debugf("Replacing key %s", kv.Key)
			kvs[id].Value = value
			replace = true
			break
		}
	}

	if !replace {
		kvs = append(kvs, KeyValue{Key: key, Value: value})
	}

	return kvs
}
Пример #21
0
func RunApp() {
	app := cli.NewApp()
	app.Name = "cloud-crane"
	app.Usage = "Multi-Scheduler Orchestrator"
	app.Version = version.VERSION + " (" + version.GITCOMMIT + ")"

	app.Flags = globalFlags()

	app.Before = func(c *cli.Context) error {
		return setupApplication(c, configuration.ReadConfiguration)
	}

	app.Commands = commands

	if err := app.Run(os.Args); err != nil {
		logger.Instance().Fatalln(err)
	}
}
Пример #22
0
func NewClusterManagerMock() *ManagerInfo {
	clusterConfig := map[string]configuration.Cluster{
		"valid-a": {
			Framework: configuration.Framework{
				"marathon": configuration.Parameters{
					"address":         "https://valid-a:8443",
					"deploy-timeout":  30,
					"basic-auth-user": "******",
					"basic-auth-pwd":  "password",
				},
			},
		},
		"valid-b": {
			Framework: configuration.Framework{
				"marathon": configuration.Parameters{
					"address":         "https://valid-b:8443",
					"deploy-timeout":  30,
					"basic-auth-user": "******",
					"basic-auth-pwd":  "password",
				},
			},
		},
	}
	cm := new(ManagerInfo)
	cm.clusters = make(map[string]core.ClusterInterface)

	for key, cfg := range clusterConfig {
		cm.clusters[key] = &ClusterMock{
			id:       key,
			address:  cfg.Framework.Parameters()["address"].(string),
			services: make([]*framework.ServiceInformation, 0),
			log:      logger.Instance().WithFields(log.Fields{"cluster": key}),
		}
	}

	return cm
}
Пример #23
0
func (sm *ManagerInfo) DeleteService(serviceId string) error {
	logger.Instance().Infoln("Starting DeleteService")

	var mutex sync.Mutex
	var deleteError error

	var wg sync.WaitGroup
	wg.Add(len(sm.clusters))
	for _, cluster := range sm.clusters {
		go func(c core.ClusterInterface) {
			defer wg.Done()

			if err := c.DeleteService(serviceId); err != nil {
				mutex.Lock()
				deleteError = fmt.Errorf("Delete process fails on cluster %s with error: %s", c.Id(), err)
				mutex.Unlock()
				return
			}
		}(cluster)
	}
	wg.Wait()

	return deleteError
}