func (c *DockerImageExists) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if Docker Image Exists")

	cfg := cfgBuilder.BuildForCluster("")
	if err := c.client.PullImage(cfg.Registry, cfg.Image(), cfg.Tag); err != nil {
		return err
	}

	logger.Instance().Infoln("Docker Image exists, check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Example #2
0
func (f *ClusterMock) DeployService(serviceConfig core.ServiceConfigurationBuilder, force bool) error {
	cfg := serviceConfig.BuildForCluster(f.Id())
	if cfg.ServiceID() == "valid1-v0" && len(servicesInfo[0].Instances) == serviceConfig.Instances {
		f.services = append(f.services, servicesInfo[0])
		return nil
	} else if cfg.ServiceID() == "valid2-v0" && len(servicesInfo[1].Instances) == serviceConfig.Instances {
		f.services = append(f.services, servicesInfo[1])
		return nil
	}

	return fmt.Errorf("Deploy with errors")
}
Example #3
0
func (c *Resources) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking Resources in all clusters")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		if err := c.checkResources(cluster, cfg); err != nil {
			return err
		}
	}

	logger.Instance().Infoln("Min Hosts Per Cluster Check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
func (c *DockerUserRestriction) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if Docker Image run with a valid user")

	cfg := cfgBuilder.BuildForCluster("")
	img, err := c.client.InspectImage(cfg.FullImageName())
	if err != nil {
		return err
	}

	if c.userRegexp.MatchString(img.Config.User) {
		return fmt.Errorf("User %s isn't allowed", img.Config.User)
	}

	logger.Instance().Infof("Docker Image runs with a valid user (%s), check done", img.Config.User)
	return checkers.HandleNextCheck(c, cfgBuilder)
}
func (c *MinHostsPerCluster) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Checking if the service meets the minimum of instances per cluster")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		minHosts, err := strconv.Atoi(cluster.Constraints()[c.constraintId])
		if err != nil {
			return fmt.Errorf("Invalid constraint")
		}
		if minHosts > cfg.Instances {
			return fmt.Errorf("Error trying to deploy %d instances in Cluster %s configured with a min of %d instances", cfg.Instances, clusterKey, minHosts)
		}
		logger.Instance().Infof("The service needs %d instances and meets the minimum of instances %d required in cluster %s", cfg.Instances, minHosts, clusterKey)
	}

	logger.Instance().Infoln("Minimum instances per cluster check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
func (c *SameHealthCheck) Check(cfgBuilder core.ServiceConfigurationBuilder) error {
	logger.Instance().Infoln("Initializing Health Configuration Check in all clusters")

	for clusterKey, cluster := range c.clusters {
		cfg := cfgBuilder.BuildForCluster(clusterKey)
		for _, hc := range cfg.HealthCheckConfig {
			logger.Instance().Infof("Checking port %d/%s Health Configuration in cluster %s", hc.GetPort(), hc.GetProtocol(), clusterKey)
			path, err := cluster.HealthCheck(cfg.ServiceName, cfg.CompatibilityVersion, hc.GetPort())
			if err != nil {
				return err
			}
			if path != hc.GetPath() {
				return fmt.Errorf("There are different Health Checks Configuration: expected %s - configured in %s: %s", hc.GetPath(), clusterKey, path)
			}
			logger.Instance().Infof("Health Check in cluster %s is ok: %s", clusterKey, path)
		}
	}

	logger.Instance().Infoln("The service has good health paths, check done")
	return checkers.HandleNextCheck(c, cfgBuilder)
}
Example #7
0
func deployCmd(c *cli.Context) error {
	sb := core.ServiceConfigurationBuilder{
		ServiceId:             c.String("service-id"),
		RegistryDomain:        c.String("registry"),
		Namespace:             c.String("namespace"),
		Tag:                   c.String("tag"),
		Cpu:                   c.Float64("cpu"),
		Memory:                c.Float64("memory"),
		MinimumHealthCapacity: c.Float64("minimumHealthCapacity"),
		MaximumOverCapacity:   c.Float64("maximumOverCapacity"),
		SyslogTag:             c.String("syslog-tag"),
		Instances:             c.Int("instances"),
		JobUrl:                c.String("deploy-url"),
		ContactEmail:          c.String("contact-email"),
	}

	envs, err := util.ParseMultiFileLinesToArray(c.StringSlice("env-file"))
	if err != nil {
		logger.Instance().Fatalln("Error parsing environment files", err)
	}
	sb.AddEnvironmentVars(envs)
	sb.AddEnvironmentVars(c.StringSlice("env"))

	sb.AddConstraints(c.StringSlice("constraint"))

	sb.AddPorts(c.StringSlice("port"))
	sb.AddLabels(c.StringSlice("label"))

	handleDeploySigTerm(clusterManager)
	if clusterManager.DeployWithRollbackOnError(sb, c.Bool("force")) {
		logger.Instance().Infoln("Deployment READY")
		var resume []callbackResume
		for clusterKey, cluster := range clusterManager.Clusters() {
			logger.Instance().Infof("Services in Cluster %s :", clusterKey)
			for _, service := range cluster.Services() {
				for _, instance := range service.Instances {
					for _, val := range instance.Ports {
						logger.Instance().Infof("Deployed %s at host %s and address %+v", instance.ID, instance.Host, val)
						instanceInfo := callbackResume{
							Id:         instance.ID,
							Address:    instance.Host + ":" + strconv.FormatInt(val.Internal, 10),
							ClusterKey: clusterKey,
						}
						resume = append(resume, instanceInfo)
					}
				}
			}
		}
		//jsonResume, _ := json.Marshal(resume)
		//fmt.Println(string(jsonResume))
		return nil
	}

	return fmt.Errorf("Deployment-Process terminated with errors")
}