func (bootstrapInstanceIDHealer) getS3Endpoint() *s3.S3 { access, err := config.GetString("aws:access-key-id") if err != nil { log.Fatal(err.Error()) } secret, err := config.GetString("aws:secret-access-key") if err != nil { log.Fatal(err.Error()) } auth := aws.Auth{AccessKey: access, SecretKey: secret} return s3.New(auth, aws.USEast) }
func dockerCluster() *cluster.Cluster { cmutex.Lock() defer cmutex.Unlock() if dCluster == nil { if segregate, _ := config.GetBool("docker:segregate"); segregate { dCluster, _ = cluster.New(segScheduler) } else { clusterNodes = make(map[string]string) servers, _ := config.GetList("docker:servers") if len(servers) < 1 { log.Fatal(`Tsuru is misconfigured. Setting "docker:servers" is mandatory`) } nodes := make([]cluster.Node, len(servers)) for index, server := range servers { id := fmt.Sprintf("server%d", index) node := cluster.Node{ ID: id, Address: server, } nodes[index] = node clusterNodes[id] = server } dCluster, _ = cluster.New(nil, nodes...) } if redisServer, err := config.GetString("docker:scheduler:redis-server"); err == nil { prefix, _ := config.GetString("docker:scheduler:redis-prefix") if password, err := config.GetString("docker:scheduler:redis-password"); err == nil { dCluster.SetStorage(storage.AuthenticatedRedis(redisServer, password, prefix)) } else { dCluster.SetStorage(storage.Redis(redisServer, prefix)) } } } return dCluster }
func getEC2Endpoint() *ec2.EC2 { access, err := config.GetString("aws:access-key-id") if err != nil { log.Fatal(err.Error()) } secret, err := config.GetString("aws:secret-access-key") if err != nil { log.Fatal(err.Error()) } endpoint, err := config.GetString("aws:ec2:endpoint") if err != nil { log.Fatal(err.Error()) } auth := aws.Auth{AccessKey: access, SecretKey: secret} return ec2.New(auth, aws.Region{EC2Endpoint: endpoint}) }
func getELBEndpoint() *elb.ELB { access, err := config.GetString("aws:access-key-id") if err != nil { log.Fatal(err.Error()) } secret, err := config.GetString("aws:secret-access-key") if err != nil { log.Fatal(err.Error()) } endpoint, err := config.GetString("juju:elb-endpoint") if err != nil { log.Fatal(err.Error()) } auth := aws.Auth{AccessKey: access, SecretKey: secret} region := aws.Region{ELBEndpoint: endpoint} return elb.New(auth, region) }
func collection() *db.Collection { name, err := config.GetString("docker:collection") if err != nil { log.Fatal(err.Error()) } conn, err := db.Conn() if err != nil { log.Errorf("Failed to connect to the database: %s", err) } return conn.Collection(name) }
func (m *ELBManager) collection() (*db.Storage, *mgo.Collection) { name, err := config.GetString("juju:elb-collection") if err != nil { log.Fatal("juju:elb-collection is undefined on config file.") } conn, err := db.Conn() if err != nil { log.Fatalf("[juju] Failed to connect to the database: %s", err) } return conn, conn.Collection(name) }
// Create creates a new Elastic Load Balancing instance for the given app. The // name of the instance will be the same as the name of the app. func (m *ELBManager) Create(app provision.Named) error { options := elb.CreateLoadBalancer{ Name: app.GetName(), Listeners: []elb.Listener{ { InstancePort: 80, InstanceProtocol: "HTTP", LoadBalancerPort: 80, Protocol: "HTTP", }, }, } var err error if m.vpc() { options.Subnets, err = config.GetList("juju:elb-vpc-subnets") if err != nil { log.Fatal(err) } options.SecurityGroups, err = config.GetList("juju:elb-vpc-secgroups") if err != nil { log.Fatal(err) } options.Scheme = "internal" } else { options.AvailZones, err = config.GetList("juju:elb-avail-zones") if err != nil { log.Fatal(err) } } resp, err := m.elb().CreateLoadBalancer(&options) if err != nil { return err } lb := loadBalancer{Name: app.GetName(), DNSName: resp.DNSName} conn, collection := m.collection() defer conn.Close() return collection.Insert(lb) }
func dockerCluster() *cluster.Cluster { cmutext.Lock() defer cmutext.Unlock() if dCluster == nil { servers, _ := config.GetList("docker:servers") if len(servers) < 1 { log.Fatal(`Tsuru is misconfigured. Setting "docker:servers" is mandatory`) } nodes := []cluster.Node{} for index, server := range servers { node := cluster.Node{ ID: fmt.Sprintf("server%d", index), Address: server, } nodes = append(nodes, node) } dCluster, _ = cluster.New(nodes...) } return dCluster }
func fatal(err error) { fmt.Fprintln(os.Stderr, err) log.Fatal(err) }
func fatal(err error) { log.Fatal(err) }
func fatal(err error) { log.Fatal(err.Error()) }