Пример #1
0
func (r elbRouter) AddBackend(name string) error {
	var err error
	options := elb.CreateLoadBalancer{
		Name: name,
		Listeners: []elb.Listener{
			{
				InstancePort:     80,
				InstanceProtocol: "HTTP",
				LoadBalancerPort: 80,
				Protocol:         "HTTP",
			},
		},
	}
	vpc, _ := config.GetBool("juju:elb-use-vpc")
	if vpc {
		options.Subnets, err = config.GetList("juju:elb-vpc-subnets")
		if err != nil {
			return err
		}
		options.SecurityGroups, err = config.GetList("juju:elb-vpc-secgroups")
		if err != nil {
			return err
		}
		options.Scheme = "internal"
	} else {
		options.AvailZones, err = config.GetList("juju:elb-avail-zones")
		if err != nil {
			return err
		}
	}
	_, err = r.elb().CreateLoadBalancer(&options)
	return router.Store(name, name)
}
Пример #2
0
func dockerCluster() *cluster.Cluster {
	cmutex.Lock()
	defer cmutex.Unlock()
	if dCluster == nil {
		if segregate, _ := config.GetBool("docker:segregate"); segregate {
			dCluster, _ = cluster.New(segScheduler)
		} else {
			clusterNodes = make(map[string]string)
			servers, _ := config.GetList("docker:servers")
			if len(servers) < 1 {
				log.Fatal(`Tsuru is misconfigured. Setting "docker:servers" is mandatory`)
			}
			nodes := make([]cluster.Node, len(servers))
			for index, server := range servers {
				id := fmt.Sprintf("server%d", index)
				node := cluster.Node{
					ID:      id,
					Address: server,
				}
				nodes[index] = node
				clusterNodes[id] = server
			}
			dCluster, _ = cluster.New(nil, nodes...)
		}
		if redisServer, err := config.GetString("docker:scheduler:redis-server"); err == nil {
			prefix, _ := config.GetString("docker:scheduler:redis-prefix")
			if password, err := config.GetString("docker:scheduler:redis-password"); err == nil {
				dCluster.SetStorage(storage.AuthenticatedRedis(redisServer, password, prefix))
			} else {
				dCluster.SetStorage(storage.Redis(redisServer, prefix))
			}
		}
	}
	return dCluster
}
Пример #3
0
func GetServers(group string) []string {
	hosts, err := config.GetList("groups:" + group)
	if err != nil {
		fmt.Printf("Group does not exists: %s\n", group)
		os.Exit(1)
	}
	return hosts
}
Пример #4
0
///////////////////////////////////////////////////////////////////////////////////////////////////////
// GetStringList
///////////////////////////////////////////////////////////////////////////////////////////////////////
func (c *Config) GetStringList(key string) []string {

	value, err := config.GetList(key)
	if err != nil {
		if value, ok := c.defaults[key]; ok {
			return value.([]string)
		} else {
			c.ThrowKeyPanic(key)
		}
	}

	return value
}
Пример #5
0
// Create creates a new Elastic Load Balancing instance for the given app. The
// name of the instance will be the same as the name of the app.
func (m *ELBManager) Create(app provision.Named) error {
	options := elb.CreateLoadBalancer{
		Name: app.GetName(),
		Listeners: []elb.Listener{
			{
				InstancePort:     80,
				InstanceProtocol: "HTTP",
				LoadBalancerPort: 80,
				Protocol:         "HTTP",
			},
		},
	}
	var err error
	if m.vpc() {
		options.Subnets, err = config.GetList("juju:elb-vpc-subnets")
		if err != nil {
			log.Fatal(err)
		}
		options.SecurityGroups, err = config.GetList("juju:elb-vpc-secgroups")
		if err != nil {
			log.Fatal(err)
		}
		options.Scheme = "internal"
	} else {
		options.AvailZones, err = config.GetList("juju:elb-avail-zones")
		if err != nil {
			log.Fatal(err)
		}
	}
	resp, err := m.elb().CreateLoadBalancer(&options)
	if err != nil {
		return err
	}
	lb := loadBalancer{Name: app.GetName(), DNSName: resp.DNSName}
	conn, collection := m.collection()
	defer conn.Close()
	return collection.Insert(lb)
}
Пример #6
0
///////////////////////////////////////////////////////////////////////////////////////////////////////
// GetIntList
///////////////////////////////////////////////////////////////////////////////////////////////////////
func (c *Config) GetIntList(key string) []int {

	if value, err := config.GetList(key); err == nil {
		return c.stringSlice2IntSlice(value)
	} else {
		if val, ok := c.defaults[key]; ok {
			return val.([]int)
		} else {
			c.ThrowKeyPanic(key)
		}
	}

	return nil
}
Пример #7
0
func getDockerServers() []cluster.Node {
	servers, _ := config.GetList("docker:servers")
	nodes := []cluster.Node{}
	clusterNodes = make(map[string]string)
	for index, server := range servers {
		id := fmt.Sprintf("server%d", index)
		node := cluster.Node{
			ID:      id,
			Address: server,
		}
		nodes = append(nodes, node)
		clusterNodes[id] = server
	}
	return nodes
}
Пример #8
0
func Valves() map[int]*models.Valve {
	relays := make(map[int]*models.Valve)

	valves, err := config.GetList("valves")
	if err != nil {
		log.Fatalf("Could not load the valves id: %v", err)
	}

	for _, value := range valves {
		valve, err := strconv.Atoi(value)
		if err != nil {
			log.Printf("Valve %s could not be configured. Ignoring")
		}
		relays[valve] = models.FirstValveOrCreate(valve)
	}

	return relays
}
Пример #9
0
func dockerCluster() *cluster.Cluster {
	cmutext.Lock()
	defer cmutext.Unlock()
	if dCluster == nil {
		servers, _ := config.GetList("docker:servers")
		if len(servers) < 1 {
			log.Fatal(`Tsuru is misconfigured. Setting "docker:servers" is mandatory`)
		}
		nodes := []cluster.Node{}
		for index, server := range servers {
			node := cluster.Node{
				ID:      fmt.Sprintf("server%d", index),
				Address: server,
			}
			nodes = append(nodes, node)
		}
		dCluster, _ = cluster.New(nodes...)
	}
	return dCluster
}