Exemplo n.º 1
0
// NewCluster is exported
func NewCluster(scheduler *scheduler.Scheduler, TLSConfig *tls.Config, discovery discovery.Backend, options cluster.DriverOpts, engineOptions *cluster.EngineOpts) (cluster.Cluster, error) {
	log.WithFields(log.Fields{"name": "swarm"}).Debug("Initializing cluster")

	cluster := &Cluster{
		eventHandlers:     cluster.NewEventHandlers(),
		engines:           make(map[string]*cluster.Engine),
		pendingEngines:    make(map[string]*cluster.Engine),
		scheduler:         scheduler,
		TLSConfig:         TLSConfig,
		discovery:         discovery,
		pendingContainers: make(map[string]*pendingContainer),
		overcommitRatio:   0.05,
		engineOpts:        engineOptions,
		createRetry:       0,
	}

	if val, ok := options.Float("swarm.overcommit", ""); ok {
		cluster.overcommitRatio = val
	}

	if val, ok := options.Int("swarm.createretry", ""); ok {
		if val < 0 {
			log.Fatalf("swarm.createretry=%d is invalid", val)
		}
		cluster.createRetry = val
	}

	discoveryCh, errCh := cluster.discovery.Watch(nil)
	go cluster.monitorDiscovery(discoveryCh, errCh)
	go cluster.monitorPendingEngines()

	return cluster, nil
}
Exemplo n.º 2
0
// NewCluster is exported
func NewCluster(scheduler *scheduler.Scheduler, TLSConfig *tls.Config, discovery discovery.Backend, options cluster.DriverOpts, engineOptions *cluster.EngineOpts) (cluster.Cluster, error) {
	log.WithFields(log.Fields{"name": "swarm"}).Debug("Initializing cluster")

	cluster := &Cluster{
		eventHandlers:     cluster.NewEventHandlers(),
		engines:           make(map[string]*cluster.Engine),
		pendingEngines:    make(map[string]*cluster.Engine),
		scheduler:         scheduler,
		TLSConfig:         TLSConfig,
		discovery:         discovery,
		pendingContainers: make(map[string]*pendingContainer),
		overcommitRatio:   0.05,
		engineOpts:        engineOptions,
		createRetry:       0,
	}

	if val, ok := options.Float("swarm.overcommit", ""); ok {
		if val <= float64(-1) {
			log.Fatalf("swarm.overcommit should be larger than -1, %f is invalid", val)
		} else if val < float64(0) {
			log.Warn("-1 < swarm.overcommit < 0 will make swarm take less resource than docker engine offers")
			cluster.overcommitRatio = val
		} else {
			cluster.overcommitRatio = val
		}
	}

	if val, ok := options.Int("swarm.createretry", ""); ok {
		if val < 0 {
			log.Fatalf("swarm.createretry can not be negative, %d is invalid", val)
		}
		cluster.createRetry = val
	}

	discoveryCh, errCh := cluster.discovery.Watch(nil)
	go cluster.monitorDiscovery(discoveryCh, errCh)
	go cluster.monitorPendingEngines()

	return cluster, nil
}
Exemplo n.º 3
0
// NewCluster for mesos Cluster creation
func NewCluster(scheduler *scheduler.Scheduler, TLSConfig *tls.Config, master string, options cluster.DriverOpts, engineOptions *cluster.EngineOpts) (cluster.Cluster, error) {
	log.WithFields(log.Fields{"name": "mesos"}).Debug("Initializing cluster")

	// Enabling mesos-go glog logging
	if log.GetLevel() == log.DebugLevel {
		flag.Lookup("logtostderr").Value.Set("true")
	}
	cluster := &Cluster{
		dockerEnginePort:    defaultDockerEnginePort,
		eventHandlers:       cluster.NewEventHandlers(),
		master:              master,
		agents:              make(map[string]*agent),
		TLSConfig:           TLSConfig,
		options:             &options,
		offerTimeout:        defaultOfferTimeout,
		taskCreationTimeout: defaultTaskCreationTimeout,
		engineOpts:          engineOptions,
		refuseTimeout:       defaultRefuseTimeout,
	}

	cluster.pendingTasks = task.NewTasks(cluster)

	// Empty string is accepted by the scheduler.
	user, _ := options.String("mesos.user", "SWARM_MESOS_USER")

	// Override the hostname here because mesos-go will try
	// to shell out to the hostname binary and it won't work with our official image.
	// Do not check error here, so mesos-go can still try.
	hostname, _ := os.Hostname()

	driverConfig := mesosscheduler.DriverConfig{
		Framework:        &mesosproto.FrameworkInfo{Name: proto.String(frameworkName), User: &user},
		Master:           cluster.master,
		HostnameOverride: hostname,
	}

	if taskCreationTimeout, ok := options.String("mesos.tasktimeout", "SWARM_MESOS_TASK_TIMEOUT"); ok {
		d, err := time.ParseDuration(taskCreationTimeout)
		if err != nil {
			return nil, err
		}
		cluster.taskCreationTimeout = d
	}
	// Changing port for https
	if cluster.TLSConfig != nil {
		cluster.dockerEnginePort = defaultDockerEngineTLSPort
	}

	if bindingPort, ok := options.Uint("mesos.port", "SWARM_MESOS_PORT"); ok {
		driverConfig.BindingPort = uint16(bindingPort)
	}

	if bindingAddress, ok := options.IP("mesos.address", "SWARM_MESOS_ADDRESS"); ok {
		if bindingAddress == nil {
			value, _ := options.String("mesos.address", "SWARM_MESOS_ADDRESS")
			return nil, fmt.Errorf(
				"invalid IP address for cluster-opt mesos.address: \"%s\"",
				value)
		}
		driverConfig.BindingAddress = bindingAddress
	}

	if checkpointFailover, ok := options.Bool("mesos.checkpointfailover", "SWARM_MESOS_CHECKPOINT_FAILOVER"); ok {
		driverConfig.Framework.Checkpoint = &checkpointFailover
	}

	if offerTimeout, ok := options.String("mesos.offertimeout", "SWARM_MESOS_OFFER_TIMEOUT"); ok {
		d, err := time.ParseDuration(offerTimeout)
		if err != nil {
			return nil, err
		}
		cluster.offerTimeout = d
	}

	if refuseTimeout, ok := options.String("mesos.offerrefusetimeout", "SWARM_MESOS_OFFER_REFUSE_TIMEOUT"); ok {
		d, err := time.ParseDuration(refuseTimeout)
		if err != nil {
			return nil, err
		}
		cluster.refuseTimeout = d
	}

	sched, err := NewScheduler(driverConfig, cluster, scheduler)
	if err != nil {
		return nil, err
	}

	cluster.scheduler = sched
	status, err := sched.driver.Start()
	if err != nil {
		log.Debugf("Mesos driver started, status/err %v: %v", status, err)
		return nil, err
	}
	log.Debugf("Mesos driver started, status %v", status)

	go func() {
		status, err := sched.driver.Join()
		log.Debugf("Mesos driver stopped unexpectedly, status/err %v: %v", status, err)

	}()

	return cluster, nil
}