예제 #1
0
func dockerCluster() *cluster.Cluster {
	cmutex.Lock()
	defer cmutex.Unlock()
	if dCluster == nil {
		debug, _ := config.GetBool("debug")
		clusterLog.SetDebug(debug)
		clusterLog.SetLogger(log.GetStdLogger())
		clusterStorage, err := buildClusterStorage()
		if err != nil {
			panic(err.Error())
		}
		var nodes []cluster.Node
		if isSegregateScheduler() {
			dCluster, _ = cluster.New(&segregatedScheduler{}, clusterStorage)
		} else {
			nodes = getDockerServers()
			dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
		}
		autoHealing, _ := config.GetBool("docker:auto-healing")
		if autoHealing {
			healer := Healer{}
			dCluster.SetHealer(&healer)
		}
		activeMonitoring, _ := config.GetBool("docker:active-monitoring")
		if activeMonitoring {
			dCluster.StartActiveMonitoring(1 * time.Minute)
		}
	}
	return dCluster
}
예제 #2
0
func (p *dockerProvisioner) initDockerCluster() error {
	debug, _ := config.GetBool("debug")
	clusterLog.SetDebug(debug)
	clusterLog.SetLogger(log.GetStdLogger())
	var err error
	if p.storage == nil {
		p.storage, err = buildClusterStorage()
		if err != nil {
			return err
		}
	}
	if p.collectionName == "" {
		var name string
		name, err = config.GetString("docker:collection")
		if err != nil {
			return err
		}
		p.collectionName = name
	}
	var nodes []cluster.Node
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
	p.scheduler = &segregatedScheduler{
		maxMemoryRatio:      float32(maxUsedMemory),
		TotalMemoryMetadata: TotalMemoryMetadata,
		provisioner:         p,
	}
	p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
	if err != nil {
		return err
	}
	p.cluster.Hook = &bs.ClusterHook{Provisioner: p}
	autoHealingNodes, _ := config.GetBool("docker:healing:heal-nodes")
	if autoHealingNodes {
		disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
		if disabledSeconds <= 0 {
			disabledSeconds = 30
		}
		maxFailures, _ := config.GetInt("docker:healing:max-failures")
		if maxFailures <= 0 {
			maxFailures = 5
		}
		waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
		if waitSecondsNewMachine <= 0 {
			waitSecondsNewMachine = 5 * 60
		}
		nodeHealer := healer.NewNodeHealer(healer.NodeHealerArgs{
			Provisioner:           p,
			DisabledTime:          time.Duration(disabledSeconds) * time.Second,
			WaitTimeNewMachine:    time.Duration(waitSecondsNewMachine) * time.Second,
			FailuresBeforeHealing: maxFailures,
		})
		shutdown.Register(nodeHealer)
		p.cluster.Healer = nodeHealer
	}
	healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
	if healContainersSeconds > 0 {
		contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
			Provisioner:         p,
			MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
			Done:                make(chan bool),
			Locker:              &appLocker{},
		})
		shutdown.Register(contHealerInst)
		go contHealerInst.RunContainerHealer()
	}
	activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
	if activeMonitoring > 0 {
		p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
	}
	autoScale := p.initAutoScaleConfig()
	if autoScale.Enabled {
		shutdown.Register(autoScale)
		go autoScale.run()
	}
	return nil
}
예제 #3
0
파일: provisioner.go 프로젝트: tsuru/tsuru
func (p *dockerProvisioner) initDockerCluster() error {
	debug, _ := config.GetBool("debug")
	clusterLog.SetDebug(debug)
	clusterLog.SetLogger(log.GetStdLogger())
	var err error
	if p.storage == nil {
		p.storage, err = buildClusterStorage()
		if err != nil {
			return err
		}
	}
	if p.collectionName == "" {
		var name string
		name, err = config.GetString("docker:collection")
		if err != nil {
			return err
		}
		p.collectionName = name
	}
	var nodes []cluster.Node
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
	p.scheduler = &segregatedScheduler{
		maxMemoryRatio:      float32(maxUsedMemory),
		TotalMemoryMetadata: TotalMemoryMetadata,
		provisioner:         p,
	}
	caPath, _ := config.GetString("docker:tls:root-path")
	if caPath != "" {
		p.caCert, err = ioutil.ReadFile(filepath.Join(caPath, "ca.pem"))
		if err != nil {
			return err
		}
		p.clientCert, err = ioutil.ReadFile(filepath.Join(caPath, "cert.pem"))
		if err != nil {
			return err
		}
		p.clientKey, err = ioutil.ReadFile(filepath.Join(caPath, "key.pem"))
		if err != nil {
			return err
		}
	}
	p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
	if err != nil {
		return err
	}
	p.cluster.AddHook(cluster.HookEventBeforeContainerCreate, &internalNodeContainer.ClusterHook{Provisioner: p})
	if tsuruHealer.HealerInstance != nil {
		healer := hookHealer{p: p}
		p.cluster.Healer = healer
		p.cluster.AddHook(cluster.HookEventBeforeNodeUnregister, healer)
	}
	healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
	if healContainersSeconds > 0 {
		contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
			Provisioner:         p,
			MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
			Done:                make(chan bool),
			Locker:              &appLocker{},
		})
		shutdown.Register(contHealerInst)
		go contHealerInst.RunContainerHealer()
	}
	activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
	if activeMonitoring > 0 {
		p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
	}
	autoScale := p.initAutoScaleConfig()
	if autoScale.Enabled {
		shutdown.Register(autoScale)
		go autoScale.run()
	}
	limitMode, _ := config.GetString("docker:limit:mode")
	if limitMode == "global" {
		p.actionLimiter = &provision.MongodbLimiter{}
	} else {
		p.actionLimiter = &provision.LocalLimiter{}
	}
	actionLimit, _ := config.GetUint("docker:limit:actions-per-host")
	if actionLimit > 0 {
		p.actionLimiter.Initialize(actionLimit)
	}
	return nil
}