Exemplo n.º 1
0
func (factory *redisPubSubFactory) dial() (redis.Conn, error) {
	host, err := config.GetString("pubsub:redis-host")
	if err != nil {
		host, err = config.GetString("redis-queue:host")
		if err != nil {
			host = "localhost"
		}
	}
	port, err := config.Get("pubsub:redis-port")
	if err != nil {
		port, err = config.Get("redis-queue:port")
		if err != nil {
			port = "6379"
		}
	}
	port = fmt.Sprintf("%v", port)
	password, err := config.GetString("pubsub:redis-password")
	if err != nil {
		password, _ = config.GetString("redis-queue:password")
	}
	db, err := config.GetInt("pubsub:redis-db")
	if err != nil {
		db, err = config.GetInt("redis-queue:db")
		if err != nil {
			db = 3
		}
	}
	secondFloat := float64(time.Second)
	dialTimeout, err := config.GetFloat("pubsub:redis-dial-timeout")
	if err != nil {
		dialTimeout = 0.1
	}
	dialTimeout = dialTimeout * secondFloat
	readTimeout, err := config.GetFloat("pubsub:redis-read-timeout")
	if err != nil {
		readTimeout = 30 * 60
	}
	readTimeout = readTimeout * secondFloat
	writeTimeout, err := config.GetFloat("pubsub:redis-write-timeout")
	if err != nil {
		writeTimeout = 0.5
	}
	writeTimeout = writeTimeout * secondFloat
	conn, err := redis.DialTimeout("tcp", fmt.Sprintf("%s:%v", host, port), time.Duration(dialTimeout), time.Duration(readTimeout), time.Duration(writeTimeout))
	if err != nil {
		return nil, err
	}
	if password != "" {
		_, err = conn.Do("AUTH", password)
		if err != nil {
			return nil, err
		}
	}
	_, err = conn.Do("SELECT", db)
	return conn, err
}
Exemplo n.º 2
0
func legacyAutoScaleRule() *autoScaleRule {
	metadataFilter, _ := config.GetString("docker:auto-scale:metadata-filter")
	maxContainerCount, _ := config.GetInt("docker:auto-scale:max-container-count")
	scaleDownRatio, _ := config.GetFloat("docker:auto-scale:scale-down-ratio")
	preventRebalance, _ := config.GetBool("docker:auto-scale:prevent-rebalance")
	return &autoScaleRule{
		MaxContainerCount: maxContainerCount,
		MetadataFilter:    metadataFilter,
		ScaleDownRatio:    float32(scaleDownRatio),
		PreventRebalance:  preventRebalance,
		Enabled:           true,
	}
}
Exemplo n.º 3
0
func (p *dockerProvisioner) initAutoScaleConfig() *autoScaleConfig {
	waitSecondsNewMachine, _ := config.GetInt("docker:auto-scale:wait-new-time")
	groupByMetadata, _ := config.GetString("docker:auto-scale:group-by-metadata")
	matadataFilter, _ := config.GetString("docker:auto-scale:metadata-filter")
	maxContainerCount, _ := config.GetInt("docker:auto-scale:max-container-count")
	runInterval, _ := config.GetInt("docker:auto-scale:run-interval")
	scaleDownRatio, _ := config.GetFloat("docker:auto-scale:scale-down-ratio")
	preventRebalance, _ := config.GetBool("docker:auto-scale:prevent-rebalance")
	totalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
	return &autoScaleConfig{
		provisioner:         p,
		groupByMetadata:     groupByMetadata,
		totalMemoryMetadata: totalMemoryMetadata,
		maxMemoryRatio:      float32(maxUsedMemory),
		maxContainerCount:   maxContainerCount,
		matadataFilter:      matadataFilter,
		scaleDownRatio:      float32(scaleDownRatio),
		waitTimeNewMachine:  time.Duration(waitSecondsNewMachine) * time.Second,
		runInterval:         time.Duration(runInterval) * time.Second,
		preventRebalance:    preventRebalance,
		done:                make(chan bool),
	}
}
Exemplo n.º 4
0
func (r *autoScaleRule) normalize() error {
	if r.ScaleDownRatio == 0.0 {
		r.ScaleDownRatio = 1.333
	} else if r.ScaleDownRatio <= 1.0 {
		err := fmt.Errorf("invalid rule, scale down ratio needs to be greater than 1.0, got %f", r.ScaleDownRatio)
		r.Error = err.Error()
		return err
	}
	if r.MaxMemoryRatio == 0.0 {
		maxMemoryRatio, _ := config.GetFloat("docker:scheduler:max-used-memory")
		r.MaxMemoryRatio = float32(maxMemoryRatio)
	}
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	if r.Enabled && r.MaxContainerCount <= 0 && (TotalMemoryMetadata == "" || r.MaxMemoryRatio <= 0) {
		err := fmt.Errorf("invalid rule, either memory information or max container count must be set")
		r.Error = err.Error()
		return err
	}
	return nil
}
Exemplo n.º 5
0
Arquivo: queue.go Projeto: tsuru/tsuru
func Queue() (monsterqueue.Queue, error) {
	queueData.RLock()
	if queueData.instance != nil {
		defer queueData.RUnlock()
		return queueData.instance, nil
	}
	queueData.RUnlock()
	queueData.Lock()
	defer queueData.Unlock()
	if queueData.instance != nil {
		return queueData.instance, nil
	}
	queueMongoUrl, _ := config.GetString("queue:mongo-url")
	if queueMongoUrl == "" {
		queueMongoUrl = "localhost:27017"
	}
	queueMongoDB, _ := config.GetString("queue:mongo-database")
	pollingInterval, _ := config.GetFloat("queue:mongo-polling-interval")
	if pollingInterval == 0.0 {
		pollingInterval = 1.0
	}
	conf := mongodb.QueueConfig{
		CollectionPrefix: "tsuru",
		Url:              queueMongoUrl,
		Database:         queueMongoDB,
		PollingInterval:  time.Duration(pollingInterval * float64(time.Second)),
	}
	var err error
	queueData.instance, err = mongodb.NewQueue(conf)
	if err != nil {
		return nil, errors.Wrap(err, "could not create queue instance, please check queue:mongo-url and queue:mongo-database config entries. error")
	}
	shutdown.Register(&queueData)
	go queueData.instance.ProcessLoop()
	return queueData.instance, nil
}
Exemplo n.º 6
0
func (p *dockerProvisioner) initDockerCluster() error {
	debug, _ := config.GetBool("debug")
	clusterLog.SetDebug(debug)
	clusterLog.SetLogger(log.GetStdLogger())
	var err error
	if p.storage == nil {
		p.storage, err = buildClusterStorage()
		if err != nil {
			return err
		}
	}
	if p.collectionName == "" {
		var name string
		name, err = config.GetString("docker:collection")
		if err != nil {
			return err
		}
		p.collectionName = name
	}
	var nodes []cluster.Node
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
	p.scheduler = &segregatedScheduler{
		maxMemoryRatio:      float32(maxUsedMemory),
		TotalMemoryMetadata: TotalMemoryMetadata,
		provisioner:         p,
	}
	p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
	if err != nil {
		return err
	}
	p.cluster.Hook = &bs.ClusterHook{Provisioner: p}
	autoHealingNodes, _ := config.GetBool("docker:healing:heal-nodes")
	if autoHealingNodes {
		disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
		if disabledSeconds <= 0 {
			disabledSeconds = 30
		}
		maxFailures, _ := config.GetInt("docker:healing:max-failures")
		if maxFailures <= 0 {
			maxFailures = 5
		}
		waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
		if waitSecondsNewMachine <= 0 {
			waitSecondsNewMachine = 5 * 60
		}
		nodeHealer := healer.NewNodeHealer(healer.NodeHealerArgs{
			Provisioner:           p,
			DisabledTime:          time.Duration(disabledSeconds) * time.Second,
			WaitTimeNewMachine:    time.Duration(waitSecondsNewMachine) * time.Second,
			FailuresBeforeHealing: maxFailures,
		})
		shutdown.Register(nodeHealer)
		p.cluster.Healer = nodeHealer
	}
	healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
	if healContainersSeconds > 0 {
		contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
			Provisioner:         p,
			MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
			Done:                make(chan bool),
			Locker:              &appLocker{},
		})
		shutdown.Register(contHealerInst)
		go contHealerInst.RunContainerHealer()
	}
	activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
	if activeMonitoring > 0 {
		p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
	}
	autoScale := p.initAutoScaleConfig()
	if autoScale.Enabled {
		shutdown.Register(autoScale)
		go autoScale.run()
	}
	return nil
}
Exemplo n.º 7
0
func NewRedisDefaultConfig(prefix string, defaultConfig *CommonConfig) (Client, error) {
	db, err := config.GetInt(prefix + ":redis-db")
	if err != nil && defaultConfig.TryLegacy {
		db, err = config.GetInt(prefix + ":db")
	}
	if err == nil {
		defaultConfig.DB = int64(db)
	}
	password, err := config.GetString(prefix + ":redis-password")
	if err != nil && defaultConfig.TryLegacy {
		password, err = config.GetString(prefix + ":password")
	}
	if err == nil {
		defaultConfig.Password = password
	}
	poolSize, err := config.GetInt(prefix + ":redis-pool-size")
	if err == nil {
		defaultConfig.PoolSize = poolSize
	}
	maxRetries, err := config.GetInt(prefix + ":redis-max-retries")
	if err == nil {
		defaultConfig.MaxRetries = maxRetries
	}
	poolTimeout, err := config.GetFloat(prefix + ":redis-pool-timeout")
	if err == nil {
		defaultConfig.PoolTimeout = time.Duration(poolTimeout * float64(time.Second))
	}
	idleTimeout, err := config.GetFloat(prefix + ":redis-pool-idle-timeout")
	if err == nil {
		defaultConfig.IdleTimeout = time.Duration(idleTimeout * float64(time.Second))
	}
	dialTimeout, err := config.GetFloat(prefix + ":redis-dial-timeout")
	if err == nil {
		defaultConfig.DialTimeout = time.Duration(dialTimeout * float64(time.Second))
	}
	readTimeout, err := config.GetFloat(prefix + ":redis-read-timeout")
	if err == nil {
		defaultConfig.ReadTimeout = time.Duration(readTimeout * float64(time.Second))
	}
	writeTimeout, err := config.GetFloat(prefix + ":redis-write-timeout")
	if err == nil {
		defaultConfig.WriteTimeout = time.Duration(writeTimeout * float64(time.Second))
	}
	sentinels, err := config.GetString(prefix + ":redis-sentinel-addrs")
	if err == nil {
		masterName, _ := config.GetString(prefix + ":redis-sentinel-master")
		if masterName == "" {
			return nil, fmt.Errorf("%s:redis-sentinel-master must be specified if using redis-sentinel", prefix)
		}
		log.Debugf("Connecting to redis sentinel from %q config prefix. Addrs: %s. Master: %s. DB: %d.", prefix, sentinels, masterName, db)
		return newRedisSentinel(createServerList(sentinels), masterName, defaultConfig)
	}
	cluster, err := config.GetString(prefix + ":redis-cluster-addrs")
	if err == nil {
		if defaultConfig.DB != 0 {
			return nil, fmt.Errorf("could not initialize redis from %q config, using redis-cluster with db != 0 is not supported", prefix)
		}
		if defaultConfig.MaxRetries != 0 {
			return nil, fmt.Errorf("could not initialize redis from %q config, using redis-cluster with max-retries > 0 is not supported", prefix)
		}
		log.Debugf("Connecting to redis cluster from %q config prefix. Addrs: %s. DB: %d.", prefix, cluster, db)
		return redisCluster(createServerList(cluster), defaultConfig)
	}
	server, err := config.GetString(prefix + ":redis-server")
	if err == nil {
		log.Debugf("Connecting to redis server from %q config prefix. Addr: %s. DB: %d.", prefix, server, db)
		return redisServer(server, defaultConfig)
	}
	host, err := config.GetString(prefix + ":redis-host")
	if err != nil && defaultConfig.TryLegacy {
		host, err = config.GetString(prefix + ":host")
	}
	if err == nil {
		portStr := "6379"
		port, err := config.Get(prefix + ":redis-port")
		if err != nil && defaultConfig.TryLegacy {
			port, err = config.Get(prefix + ":port")
		}
		if err == nil {
			portStr = fmt.Sprintf("%v", port)
		}
		addr := fmt.Sprintf("%s:%s", host, portStr)
		log.Debugf("Connecting to redis host/port from %q config prefix. Addr: %s. DB: %d.", prefix, addr, db)
		return redisServer(addr, defaultConfig)
	}
	if defaultConfig.TryLocal {
		addr := "localhost:6379"
		log.Debugf("Connecting to redis on localhost from %q config prefix. Addr: %s. DB: %d.", prefix, addr, db)
		return redisServer(addr, defaultConfig)
	}
	return nil, ErrNoRedisConfig
}
Exemplo n.º 8
0
func (p *dockerProvisioner) initDockerCluster() error {
	debug, _ := config.GetBool("debug")
	clusterLog.SetDebug(debug)
	clusterLog.SetLogger(log.GetStdLogger())
	var err error
	if p.storage == nil {
		p.storage, err = buildClusterStorage()
		if err != nil {
			return err
		}
	}
	if p.collectionName == "" {
		var name string
		name, err = config.GetString("docker:collection")
		if err != nil {
			return err
		}
		p.collectionName = name
	}
	var nodes []cluster.Node
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	maxUsedMemory, _ := config.GetFloat("docker:scheduler:max-used-memory")
	p.scheduler = &segregatedScheduler{
		maxMemoryRatio:      float32(maxUsedMemory),
		TotalMemoryMetadata: TotalMemoryMetadata,
		provisioner:         p,
	}
	caPath, _ := config.GetString("docker:tls:root-path")
	if caPath != "" {
		p.caCert, err = ioutil.ReadFile(filepath.Join(caPath, "ca.pem"))
		if err != nil {
			return err
		}
		p.clientCert, err = ioutil.ReadFile(filepath.Join(caPath, "cert.pem"))
		if err != nil {
			return err
		}
		p.clientKey, err = ioutil.ReadFile(filepath.Join(caPath, "key.pem"))
		if err != nil {
			return err
		}
	}
	p.cluster, err = cluster.New(p.scheduler, p.storage, nodes...)
	if err != nil {
		return err
	}
	p.cluster.AddHook(cluster.HookEventBeforeContainerCreate, &internalNodeContainer.ClusterHook{Provisioner: p})
	if tsuruHealer.HealerInstance != nil {
		healer := hookHealer{p: p}
		p.cluster.Healer = healer
		p.cluster.AddHook(cluster.HookEventBeforeNodeUnregister, healer)
	}
	healContainersSeconds, _ := config.GetInt("docker:healing:heal-containers-timeout")
	if healContainersSeconds > 0 {
		contHealerInst := healer.NewContainerHealer(healer.ContainerHealerArgs{
			Provisioner:         p,
			MaxUnresponsiveTime: time.Duration(healContainersSeconds) * time.Second,
			Done:                make(chan bool),
			Locker:              &appLocker{},
		})
		shutdown.Register(contHealerInst)
		go contHealerInst.RunContainerHealer()
	}
	activeMonitoring, _ := config.GetInt("docker:healing:active-monitoring-interval")
	if activeMonitoring > 0 {
		p.cluster.StartActiveMonitoring(time.Duration(activeMonitoring) * time.Second)
	}
	autoScale := p.initAutoScaleConfig()
	if autoScale.Enabled {
		shutdown.Register(autoScale)
		go autoScale.run()
	}
	limitMode, _ := config.GetString("docker:limit:mode")
	if limitMode == "global" {
		p.actionLimiter = &provision.MongodbLimiter{}
	} else {
		p.actionLimiter = &provision.LocalLimiter{}
	}
	actionLimit, _ := config.GetUint("docker:limit:actions-per-host")
	if actionLimit > 0 {
		p.actionLimiter.Initialize(actionLimit)
	}
	return nil
}