func dockerCluster() *cluster.Cluster {
	cmutex.Lock()
	defer cmutex.Unlock()
	if dCluster == nil {
		debug, _ := config.GetBool("debug")
		clusterLog.SetDebug(debug)
		clusterLog.SetLogger(log.GetStdLogger())
		clusterStorage, err := buildClusterStorage()
		if err != nil {
			panic(err.Error())
		}
		var nodes []cluster.Node
		if isSegregateScheduler() {
			dCluster, _ = cluster.New(&segregatedScheduler{}, clusterStorage)
		} else {
			nodes = getDockerServers()
			dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
		}
		autoHealing, _ := config.GetBool("docker:auto-healing")
		if autoHealing {
			healer := Healer{}
			dCluster.SetHealer(&healer)
		}
		activeMonitoring, _ := config.GetBool("docker:active-monitoring")
		if activeMonitoring {
			dCluster.StartActiveMonitoring(1 * time.Minute)
		}
	}
	return dCluster
}
Exemple #2
0
func createUser(w http.ResponseWriter, r *http.Request) error {
	registrationEnabled, _ := config.GetBool("auth:user-registration")
	if !registrationEnabled {
		token := r.Header.Get("Authorization")
		t, err := app.AuthScheme.Auth(token)
		if err != nil {
			return createDisabledErr
		}
		user, err := t.User()
		if err != nil {
			return createDisabledErr
		}
		if !user.IsAdmin() {
			return createDisabledErr
		}
	}
	var u auth.User
	err := json.NewDecoder(r.Body).Decode(&u)
	if err != nil {
		return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
	}
	_, err = app.AuthScheme.Create(&u)
	if err != nil {
		return handleAuthError(err)
	}
	rec.Log(u.Email, "create-user")
	w.WriteHeader(http.StatusCreated)
	return nil
}
Exemple #3
0
func (s *S) TestGetS3Endpoint(c *gocheck.C) {
	oldRegion, _ := config.Get("aws:s3:region-name")
	defer config.Set("aws:s3:region-name", oldRegion)
	config.Set("aws:s3:region-name", "myregion")
	edp, err := config.GetString("aws:s3:endpoint")
	c.Assert(err, gocheck.IsNil)
	locConst, err := config.GetBool("aws:s3:location-constraint")
	c.Assert(err, gocheck.IsNil)
	lwrCaseBucket, err := config.GetBool("aws:s3:lowercase-bucket")
	c.Assert(err, gocheck.IsNil)
	s3 := getS3Endpoint()
	c.Assert(s3.S3Endpoint, gocheck.Equals, edp)
	c.Assert(s3.S3LocationConstraint, gocheck.Equals, locConst)
	c.Assert(s3.S3LowercaseBucket, gocheck.Equals, lwrCaseBucket)
	c.Assert(s3.Region.Name, gocheck.Equals, "myregion")
}
Exemple #4
0
func (p *JujuProvisioner) elbSupport() bool {
	if p.elb == nil {
		elb, _ := config.GetBool("juju:use-elb")
		p.elb = &elb
	}
	return *p.elb
}
Exemple #5
0
func (h elbInstanceHealer) checkInstances(names []string) ([]elbInstance, error) {
	if elbSupport, _ := config.GetBool("juju:use-elb"); !elbSupport {
		return nil, nil
	}
	lbs, err := h.describeLoadBalancers(names)
	if err != nil {
		return nil, err
	}
	var unhealthy []elbInstance
	description := "Instance has failed at least the UnhealthyThreshold number of health checks consecutively."
	state := "OutOfService"
	reasonCode := "Instance"
	for _, lb := range lbs {
		instances, err := h.describeInstancesHealth(lb)
		if err != nil {
			return nil, err
		}
		for _, instance := range instances {
			if instance.description == description &&
				instance.state == state &&
				instance.reasonCode == reasonCode {
				unhealthy = append(unhealthy, instance)
			}
		}
	}
	log.Debugf("Found %d unhealthy instances.", len(unhealthy))
	return unhealthy, nil
}
Exemple #6
0
func Initialize() (*NodeHealer, error) {
	if HealerInstance != nil {
		return nil, errors.New("healer alread initialized")
	}
	autoHealingNodes, err := config.GetBool("docker:healing:heal-nodes")
	if err != nil {
		autoHealingNodes = true
	}
	if !autoHealingNodes {
		return nil, nil
	}
	disabledSeconds, _ := config.GetInt("docker:healing:disabled-time")
	if disabledSeconds <= 0 {
		disabledSeconds = 30
	}
	maxFailures, _ := config.GetInt("docker:healing:max-failures")
	if maxFailures <= 0 {
		maxFailures = 5
	}
	waitSecondsNewMachine, _ := config.GetInt("docker:healing:wait-new-time")
	if waitSecondsNewMachine <= 0 {
		waitSecondsNewMachine = 5 * 60
	}
	HealerInstance = newNodeHealer(nodeHealerArgs{
		DisabledTime:          time.Duration(disabledSeconds) * time.Second,
		WaitTimeNewMachine:    time.Duration(waitSecondsNewMachine) * time.Second,
		FailuresBeforeHealing: maxFailures,
	})
	shutdown.Register(HealerInstance)
	return HealerInstance, nil
}
Exemple #7
0
// CreateApp creates a new app.
//
// Creating a new app is a process composed of four steps:
//
//       1. Save the app in the database
//       2. Create IAM credentials for the app
//       3. Create S3 bucket for the app (if the bucket support is enabled)
//       4. Create the git repository using gandalf
//       5. Provision units within the provisioner
func CreateApp(app *App, user *auth.User) error {
	teams, err := user.Teams()
	if err != nil {
		return err
	}
	if len(teams) == 0 {
		return NoTeamsError{}
	}
	if _, err := getPlatform(app.Platform); err != nil {
		return err
	}
	app.SetTeams(teams)
	app.Owner = user.Email
	if !app.isValid() {
		msg := "Invalid app name, your app should have at most 63 " +
			"characters, containing only lower case letters, numbers or dashes, " +
			"starting with a letter."
		return &errors.ValidationError{Message: msg}
	}
	actions := []*action.Action{&reserveUserApp, &insertApp}
	useS3, _ := config.GetBool("bucket-support")
	if useS3 {
		actions = append(actions, &createIAMUserAction,
			&createIAMAccessKeyAction,
			&createBucketAction, &createUserPolicyAction)
	}
	actions = append(actions, &exportEnvironmentsAction,
		&createRepository, &provisionApp)
	pipeline := action.NewPipeline(actions...)
	err = pipeline.Execute(app, user)
	if err != nil {
		return &AppCreationError{app: app.Name, Err: err}
	}
	return nil
}
Exemple #8
0
func (r elbRouter) AddBackend(name string) error {
	var err error
	options := elb.CreateLoadBalancer{
		Name: name,
		Listeners: []elb.Listener{
			{
				InstancePort:     80,
				InstanceProtocol: "HTTP",
				LoadBalancerPort: 80,
				Protocol:         "HTTP",
			},
		},
	}
	vpc, _ := config.GetBool("juju:elb-use-vpc")
	if vpc {
		options.Subnets, err = config.GetList("juju:elb-vpc-subnets")
		if err != nil {
			return err
		}
		options.SecurityGroups, err = config.GetList("juju:elb-vpc-secgroups")
		if err != nil {
			return err
		}
		options.Scheme = "internal"
	} else {
		options.AvailZones, err = config.GetList("juju:elb-avail-zones")
		if err != nil {
			return err
		}
	}
	_, err = r.elb().CreateLoadBalancer(&options)
	return router.Store(name, name)
}
func (c *container) start() error {
	port, err := getPort()
	if err != nil {
		return err
	}
	sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
	sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
	sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation")
	sharedSalt, _ := config.GetString("docker:sharedfs:salt")
	config := docker.HostConfig{}
	config.PortBindings = map[docker.Port][]docker.PortBinding{
		docker.Port(port + "/tcp"): {{HostIp: "", HostPort: ""}},
		docker.Port("22/tcp"):      {{HostIp: "", HostPort: ""}},
	}
	if sharedBasedir != "" && sharedMount != "" {
		if sharedIsolation {
			var appHostDir string
			if sharedSalt != "" {
				h := crypto.SHA1.New()
				io.WriteString(h, sharedSalt+c.AppName)
				appHostDir = fmt.Sprintf("%x", h.Sum(nil))
			} else {
				appHostDir = c.AppName
			}
			config.Binds = append(config.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount))
		} else {
			config.Binds = append(config.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount))
		}
	}
	err = dockerCluster().StartContainer(c.ID, &config)
	if err != nil {
		return err
	}
	return nil
}
Exemple #10
0
func (s *segregatedScheduler) filterByMemoryUsage(a *app.App, nodes []cluster.Node, maxMemoryRatio float32, TotalMemoryMetadata string) ([]cluster.Node, error) {
	if maxMemoryRatio == 0 || TotalMemoryMetadata == "" {
		return nodes, nil
	}
	hosts := make([]string, len(nodes))
	for i := range nodes {
		hosts[i] = urlToHost(nodes[i].Address)
	}
	containers, err := s.provisioner.ListContainers(bson.M{"hostaddr": bson.M{"$in": hosts}, "id": bson.M{"$nin": s.ignoredContainers}})
	if err != nil {
		return nil, err
	}
	hostReserved := make(map[string]int64)
	for _, cont := range containers {
		a, err := app.GetByName(cont.AppName)
		if err != nil {
			return nil, err
		}
		hostReserved[cont.HostAddr] += a.Plan.Memory
	}
	megabyte := float64(1024 * 1024)
	nodeList := make([]cluster.Node, 0, len(nodes))
	for _, node := range nodes {
		totalMemory, _ := strconv.ParseFloat(node.Metadata[TotalMemoryMetadata], 64)
		shouldAdd := true
		if totalMemory != 0 {
			maxMemory := totalMemory * float64(maxMemoryRatio)
			host := urlToHost(node.Address)
			nodeReserved := hostReserved[host] + a.Plan.Memory
			if nodeReserved > int64(maxMemory) {
				shouldAdd = false
				tryingToReserveMB := float64(a.Plan.Memory) / megabyte
				reservedMB := float64(hostReserved[host]) / megabyte
				limitMB := maxMemory / megabyte
				log.Errorf("Node %q has reached its memory limit. "+
					"Limit %0.4fMB. Reserved: %0.4fMB. Needed additional %0.4fMB",
					host, limitMB, reservedMB, tryingToReserveMB)
			}
		}
		if shouldAdd {
			nodeList = append(nodeList, node)
		}
	}
	if len(nodeList) == 0 {
		autoScaleEnabled, _ := config.GetBool("docker:auto-scale:enabled")
		errMsg := fmt.Sprintf("no nodes found with enough memory for container of %q: %0.4fMB",
			a.Name, float64(a.Plan.Memory)/megabyte)
		if autoScaleEnabled {
			// Allow going over quota temporarily because auto-scale will be
			// able to detect this and automatically add a new nodes.
			log.Errorf("WARNING: %s. Will ignore memory restrictions.", errMsg)
			return nodes, nil
		}
		return nil, errors.New(errMsg)
	}
	return nodeList, nil
}
Exemple #11
0
func Init() {
	var loggers []Logger
	debug, _ := config.GetBool("debug")
	if logFileName, err := config.GetString("log:file"); err == nil {
		loggers = append(loggers, NewFileLogger(logFileName, debug))
	} else if err == config.ErrMismatchConf {
		panic(fmt.Sprintf("%s please see http://docs.tsuru.io/en/latest/reference/config.html#log-file", err))
	}
	if disableSyslog, _ := config.GetBool("log:disable-syslog"); !disableSyslog {
		tag, _ := config.GetString("log:syslog-tag")
		if tag == "" {
			tag = "tsurud"
		}
		loggers = append(loggers, NewSyslogLogger(tag, debug))
	}
	if useStderr, _ := config.GetBool("log:use-stderr"); useStderr {
		loggers = append(loggers, NewWriterLogger(os.Stderr, debug))
	}
	SetLogger(NewMultiLogger(loggers...))
}
Exemple #12
0
func getHostAddr(hostID string) string {
	var fullAddress string
	if seg, _ := config.GetBool("docker:segregate"); seg {
		node, _ := segScheduler.GetNode(hostID)
		fullAddress = node.Address
	} else {
		fullAddress = clusterNodes[hostID]
	}
	url, _ := url.Parse(fullAddress)
	host, _, _ := net.SplitHostPort(url.Host)
	return host
}
Exemple #13
0
func readConfig(path string) (Config, error) {
	cfg := Config{}
	configFile := filepath.Join(path, "config.yaml")

	err := config.ReadConfigFile(configFile)
	if err != nil {
		return cfg, err
	}

	cfg.Id, err = config.GetString("id")
	if err != nil {
		return cfg, err
	}

	cfg.Hostname, err = config.GetString("hostname")
	if err != nil {
		return cfg, err
	}

	cfg.DiskPath = filepath.Join(path, "disk.qcow")
	cfg.Disk, err = config.GetInt("disk")
	if err != nil {
		return cfg, err
	}

	cfg.Cpu, err = config.GetInt("cpu")
	if err != nil {
		return cfg, err
	}

	cfg.Memory, err = config.GetInt("memory")
	if err != nil {
		return cfg, err
	}

	cfg.DNS, err = config.GetString("dns")
	if err != nil {
		return cfg, err
	}

	cfg.Docker, err = config.GetString("docker")
	if err != nil {
		return cfg, err
	}

	cfg.Extra, err = config.GetString("extra")
	if err != nil {
		return cfg, err
	}

	cfg.Route, err = config.GetBool("route")
	return cfg, err
}
Exemple #14
0
func (c *container) start(p *dockerProvisioner, app provision.App, isDeploy bool) error {
	port, err := getPort()
	if err != nil {
		return err
	}
	sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
	sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
	sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation")
	sharedSalt, _ := config.GetString("docker:sharedfs:salt")
	hostConfig := docker.HostConfig{
		Memory:     app.GetMemory(),
		MemorySwap: app.GetMemory() + app.GetSwap(),
		CPUShares:  int64(app.GetCpuShare()),
	}
	if !isDeploy {
		hostConfig.RestartPolicy = docker.AlwaysRestart()
		hostConfig.PortBindings = map[docker.Port][]docker.PortBinding{
			docker.Port(port + "/tcp"): {{HostIP: "", HostPort: ""}},
		}
		hostConfig.LogConfig = docker.LogConfig{
			Type: "syslog",
			Config: map[string]string{
				"syslog-address": fmt.Sprintf("udp://localhost:%d", getBsSysLogPort()),
			},
		}
	}
	hostConfig.SecurityOpt, _ = config.GetList("docker:security-opts")
	if sharedBasedir != "" && sharedMount != "" {
		if sharedIsolation {
			var appHostDir string
			if sharedSalt != "" {
				h := crypto.SHA1.New()
				io.WriteString(h, sharedSalt+c.AppName)
				appHostDir = fmt.Sprintf("%x", h.Sum(nil))
			} else {
				appHostDir = c.AppName
			}
			hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount))
		} else {
			hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount))
		}
	}
	err = p.getCluster().StartContainer(c.ID, &hostConfig)
	if err != nil {
		return err
	}
	initialStatus := provision.StatusStarting.String()
	if isDeploy {
		initialStatus = provision.StatusBuilding.String()
	}
	return c.setStatus(p, initialStatus, false)
}
Exemple #15
0
func legacyAutoScaleRule() *autoScaleRule {
	metadataFilter, _ := config.GetString("docker:auto-scale:metadata-filter")
	maxContainerCount, _ := config.GetInt("docker:auto-scale:max-container-count")
	scaleDownRatio, _ := config.GetFloat("docker:auto-scale:scale-down-ratio")
	preventRebalance, _ := config.GetBool("docker:auto-scale:prevent-rebalance")
	return &autoScaleRule{
		MaxContainerCount: maxContainerCount,
		MetadataFilter:    metadataFilter,
		ScaleDownRatio:    float32(scaleDownRatio),
		PreventRebalance:  preventRebalance,
		Enabled:           true,
	}
}
Exemple #16
0
// CreateApp creates a new app.
//
// Creating a new app is a process composed of four steps:
//
//       1. Save the app in the database
//       2. Create IAM credentials for the app
//       3. Create S3 bucket for the app (if the bucket support is enabled)
//       4. Create the git repository using gandalf
//       5. Provision units within the provisioner
func CreateApp(app *App, user *auth.User) error {
	teams, err := user.Teams()
	if err != nil {
		return err
	}
	if len(teams) == 0 {
		return NoTeamsError{}
	}
	if _, err := getPlatform(app.Platform); err != nil {
		return err
	}
	// app.Memory is empty, no custom memory passed from CLI
	if app.Memory < 1 {
		// get default memory limit from tsuru config
		configMemory, err := config.GetInt("docker:memory")
		if err != nil {
			// no default memory set in config (or error when reading), set it as unlimited (0)
			app.Memory = 0
		} else {
			// default memory set in config, use that.
			app.Memory = configMemory
		}
	}
	if err := app.setTeamOwner(teams); err != nil {
		return err
	}
	app.SetTeams(teams)
	app.Owner = user.Email
	if !app.isValid() {
		msg := "Invalid app name, your app should have at most 63 " +
			"characters, containing only lower case letters, numbers or dashes, " +
			"starting with a letter."
		return &errors.ValidationError{Message: msg}
	}
	actions := []*action.Action{&reserveUserApp, &insertApp}
	useS3, _ := config.GetBool("bucket-support")
	if useS3 {
		actions = append(actions, &createIAMUserAction,
			&createIAMAccessKeyAction,
			&createBucketAction, &createUserPolicyAction)
	}
	actions = append(actions, &exportEnvironmentsAction,
		&createRepository, &provisionApp)
	pipeline := action.NewPipeline(actions...)
	err = pipeline.Execute(app, user)
	if err != nil {
		return &AppCreationError{app: app.Name, Err: err}
	}
	return nil
}
Exemple #17
0
// Check Schedulers
// It verifies your scheduler configuration and validates related confs.
func checkScheduler() error {
	if servers, err := config.Get("docker:servers"); err == nil && servers != nil {
		return fmt.Errorf(`Using docker:servers is deprecated, please remove it your config and use "tsuru-admin docker-node-add" do add docker nodes.`)
	}
	isSegregate, err := config.GetBool("docker:segregate")
	if err == nil {
		if isSegregate {
			return config.NewWarning(`Setting "docker:segregate" is not necessary anymore, this is the default behavior from now on.`)
		} else {
			return fmt.Errorf(`You must remove "docker:segregate" from your config.`)
		}
	}
	return nil
}
Exemple #18
0
func (p *dockerProvisioner) initAutoScaleConfig() *autoScaleConfig {
	enabled, _ := config.GetBool("docker:auto-scale:enabled")
	waitSecondsNewMachine, _ := config.GetInt("docker:auto-scale:wait-new-time")
	runInterval, _ := config.GetInt("docker:auto-scale:run-interval")
	TotalMemoryMetadata, _ := config.GetString("docker:scheduler:total-memory-metadata")
	return &autoScaleConfig{
		TotalMemoryMetadata: TotalMemoryMetadata,
		WaitTimeNewMachine:  time.Duration(waitSecondsNewMachine) * time.Second,
		RunInterval:         time.Duration(runInterval) * time.Second,
		Enabled:             enabled,
		provisioner:         p,
		done:                make(chan bool),
	}
}
Exemple #19
0
func Init() {
	debug, err := config.GetBool("debug")
	if err != nil {
		debug = false
	}
	logFileName, err := config.GetString("log:file")
	var logger Logger
	if err != nil {
		logger = NewSyslogLogger("tsr", debug)
	} else {
		logger = NewFileLogger(logFileName, debug)
	}
	SetLogger(logger)
}
Exemple #20
0
// getS3Endpoint returns an s3.S3 instance configured with information provided
// by aws:s3:* settings.
func getS3Endpoint() *s3.S3 {
	regionName, _ := config.GetString("aws:s3:region-name")
	endpoint, err := config.GetString("aws:s3:endpoint")
	if err != nil {
		panic("FATAL: aws:s3:endpoint must be defined in configuration file.")
	}
	bucketEndpoint, _ := config.GetString("aws:s3:bucketEndpoint")
	locationConstraint, err := config.GetBool("aws:s3:location-constraint")
	if err != nil {
		panic("FATAL: aws:s3:location-constraint must be defined in configuration file.")
	}
	lowercaseBucket, err := config.GetBool("aws:s3:lowercase-bucket")
	if err != nil {
		panic("FATAL: aws:s3:lowercase-bucket must be defined in configuration file.")
	}
	region := aws.Region{
		Name:                 regionName,
		S3Endpoint:           endpoint,
		S3BucketEndpoint:     bucketEndpoint,
		S3LocationConstraint: locationConstraint,
		S3LowercaseBucket:    lowercaseBucket,
	}
	return s3.New(getAWSAuth(), region)
}
Exemple #21
0
func (c *Container) hostConfig(app provision.App, isDeploy bool) (*docker.HostConfig, error) {
	sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir")
	sharedMount, _ := config.GetString("docker:sharedfs:mountpoint")
	sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation")
	sharedSalt, _ := config.GetString("docker:sharedfs:salt")
	hostConfig := docker.HostConfig{
		CPUShares: int64(app.GetCpuShare()),
	}

	if !isDeploy {
		hostConfig.Memory = app.GetMemory()
		hostConfig.MemorySwap = app.GetMemory() + app.GetSwap()
		hostConfig.RestartPolicy = docker.AlwaysRestart()
		hostConfig.PortBindings = map[docker.Port][]docker.PortBinding{
			docker.Port(c.ExposedPort): {{HostIP: "", HostPort: ""}},
		}
		pool := app.GetPool()
		driver, opts, logErr := LogOpts(pool)
		if logErr != nil {
			return nil, logErr
		}
		hostConfig.LogConfig = docker.LogConfig{
			Type:   driver,
			Config: opts,
		}
	} else {
		hostConfig.OomScoreAdj = 1000
	}

	hostConfig.SecurityOpt, _ = config.GetList("docker:security-opts")
	if sharedBasedir != "" && sharedMount != "" {
		if sharedIsolation {
			var appHostDir string
			if sharedSalt != "" {
				h := crypto.SHA1.New()
				io.WriteString(h, sharedSalt+c.AppName)
				appHostDir = fmt.Sprintf("%x", h.Sum(nil))
			} else {
				appHostDir = c.AppName
			}
			hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount))
		} else {
			hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount))
		}
	}
	return &hostConfig, nil
}
Exemple #22
0
func index(w http.ResponseWriter, r *http.Request) error {
	host, _ := config.GetString("host")
	userCreate, _ := config.GetBool("auth:user-registration")
	scheme, _ := config.GetString("auth:scheme")
	repoManager, _ := config.GetString("repo-manager")
	data := map[string]interface{}{
		"tsuruTarget": host,
		"userCreate":  userCreate,
		"nativeLogin": scheme == "" || scheme == "native",
		"keysEnabled": repoManager == "" || repoManager == "gandalf",
	}
	template, err := getTemplate()
	if err != nil {
		return err
	}
	return template.Execute(w, data)
}
Exemple #23
0
func createRouter(routerName, configPrefix string) (router.Router, error) {
	apiUrl, err := config.GetString(configPrefix + ":api-url")
	if err != nil {
		return nil, err
	}
	username, _ := config.GetString(configPrefix + ":username")
	password, _ := config.GetString(configPrefix + ":password")
	token, _ := config.GetString(configPrefix + ":token")
	tokenHeader, _ := config.GetString(configPrefix + ":token-header")
	if token == "" && (username == "" || password == "") {
		return nil, errors.Errorf("either token or username and password must be set for galeb router")
	}
	domain, err := config.GetString(configPrefix + ":domain")
	if err != nil {
		return nil, err
	}
	environment, _ := config.GetString(configPrefix + ":environment")
	project, _ := config.GetString(configPrefix + ":project")
	balancePolicy, _ := config.GetString(configPrefix + ":balance-policy")
	ruleType, _ := config.GetString(configPrefix + ":rule-type")
	debug, _ := config.GetBool(configPrefix + ":debug")
	waitTimeoutSec, err := config.GetInt(configPrefix + ":wait-timeout")
	if err != nil {
		waitTimeoutSec = 10 * 60
	}
	client := galebClient.GalebClient{
		ApiUrl:        apiUrl,
		Username:      username,
		Password:      password,
		Token:         token,
		TokenHeader:   tokenHeader,
		Environment:   environment,
		Project:       project,
		BalancePolicy: balancePolicy,
		RuleType:      ruleType,
		WaitTimeout:   time.Duration(waitTimeoutSec) * time.Second,
		Debug:         debug,
	}
	r := galebRouter{
		client:     &client,
		domain:     domain,
		prefix:     configPrefix,
		routerName: routerName,
	}
	return &r, nil
}
Exemple #24
0
func (c *DBTokenCache) PutToken(t *goauth2.Token) error {
	if t.AccessToken == "" {
		return ErrEmptyAccessToken
	}
	var email string
	if t.Extra == nil || t.Extra["email"] == "" {
		conf, err := c.scheme.loadConfig()
		if err != nil {
			return err
		}
		transport := &goauth2.Transport{Config: &conf}
		transport.Token = t
		client := transport.Client()
		response, err := client.Get(c.scheme.InfoUrl)
		if err != nil {
			return err
		}
		defer response.Body.Close()
		email, err = c.scheme.Parser.Parse(response)
		if email == "" {
			return ErrEmptyUserEmail
		}
		user, err := auth.GetUserByEmail(email)
		if err != nil {
			if err != auth.ErrUserNotFound {
				return err
			}
			registrationEnabled, _ := config.GetBool("auth:user-registration")
			if !registrationEnabled {
				return err
			}
			user = &auth.User{Email: email}
			err := user.Create()
			if err != nil {
				return err
			}
		}
		err = user.CreateOnGandalf()
		if err != nil {
			log.Errorf("Ignored error trying to create user on gandalf: %s", err.Error())
		}
		t.Extra = make(map[string]string)
		t.Extra["email"] = email
	}
	return makeToken(t).save()
}
Exemple #25
0
func hostToNodeName(host string) (string, error) {
	var nodes []cluster.Node
	var err error
	if seg, _ := config.GetBool("docker:segregate"); seg {
		nodes, err = segScheduler.Nodes()
		if err != nil {
			return "", err
		}
	} else {
		nodes = getDockerServers()
	}
	for _, node := range nodes {
		if getHostAddr(node.ID) == host {
			return node.ID, nil
		}
	}
	return "", errors.New(fmt.Sprintf("Host `%s` not found", host))
}
Exemple #26
0
func (s *OAuthScheme) handleToken(t *oauth2.Token) (*Token, error) {
	if t.AccessToken == "" {
		return nil, ErrEmptyAccessToken
	}
	conf, err := s.loadConfig()
	if err != nil {
		return nil, err
	}
	client := conf.Client(context.Background(), t)
	response, err := client.Get(s.InfoUrl)
	if err != nil {
		return nil, err
	}
	defer response.Body.Close()
	email, err := s.Parser.Parse(response)
	if err != nil {
		return nil, err
	}
	if email == "" {
		return nil, ErrEmptyUserEmail
	}
	user, err := auth.GetUserByEmail(email)
	if err != nil {
		if err != auth.ErrUserNotFound {
			return nil, err
		}
		registrationEnabled, _ := config.GetBool("auth:user-registration")
		if !registrationEnabled {
			return nil, err
		}
		user = &auth.User{Email: email}
		err = user.Create()
		if err != nil {
			return nil, err
		}
	}
	token := Token{*t, email}
	err = token.save()
	if err != nil {
		return nil, err
	}
	return &token, nil
}
Exemple #27
0
func createRouter(prefix string) (router.Router, error) {
	apiUrl, err := config.GetString(prefix + ":api-url")
	if err != nil {
		return nil, err
	}
	username, err := config.GetString(prefix + ":username")
	if err != nil {
		return nil, err
	}
	password, err := config.GetString(prefix + ":password")
	if err != nil {
		return nil, err
	}
	domain, err := config.GetString(prefix + ":domain")
	if err != nil {
		return nil, err
	}
	environment, _ := config.GetString(prefix + ":environment")
	project, _ := config.GetString(prefix + ":project")
	balancePolicy, _ := config.GetString(prefix + ":balance-policy")
	ruleType, _ := config.GetString(prefix + ":rule-type")
	targetTypeBackend, _ := config.GetString(prefix + ":target-type-backend")
	targetTypePool, _ := config.GetString(prefix + ":target-type-backend-pool")
	debug, _ := config.GetBool(prefix + ":debug")
	client := galebClient.GalebClient{
		ApiUrl:            apiUrl,
		Username:          username,
		Password:          password,
		Environment:       environment,
		Project:           project,
		BalancePolicy:     balancePolicy,
		RuleType:          ruleType,
		TargetTypeBackend: targetTypeBackend,
		TargetTypePool:    targetTypePool,
		Debug:             debug,
	}
	r := galebRouter{
		client: &client,
		domain: domain,
		prefix: prefix,
	}
	return &r, nil
}
Exemple #28
0
func (s *SAMLAuthScheme) Login(params map[string]string) (auth.Token, error) {
	_, err := s.loadConfig()
	if err != nil {
		return nil, err
	}
	if _, ok := params["callback"]; ok {
		return nil, s.callback(params)
	}
	requestId, ok := params["request_id"]
	if !ok {
		return nil, ErrMissingRequestIdError
	}
	req := request{}
	err = req.getById(requestId)
	if err != nil {
		return nil, err
	}
	if !req.Authed {
		return nil, ErrRequestWaitingForCredentials
	}
	user, err := auth.GetUserByEmail(req.Email)
	if err != nil {
		if err != auth.ErrUserNotFound {
			return nil, err
		}
		registrationEnabled, _ := config.GetBool("auth:user-registration")
		if !registrationEnabled {
			return nil, err
		}
		user = &auth.User{Email: req.Email}
		err = user.Create()
		if err != nil {
			return nil, err
		}
	}
	token, err := createToken(user)
	if err != nil {
		return nil, err
	}
	req.Remove()
	return token, nil
}
Exemple #29
0
// ReadWriteURL formats the git ssh url and return it. If no remote is configured in
// gandalf.conf, this method panics.
func (r *Repository) ReadWriteURL() string {
	uid, err := config.GetString("uid")
	if err != nil {
		panic(err.Error())
	}
	remote := uid + "@%s:%s.git"
	if useSSH, _ := config.GetBool("git:ssh:use"); useSSH {
		port, err := config.GetString("git:ssh:port")
		if err == nil {
			remote = "ssh://" + uid + "@%s:" + port + "/%s.git"
		} else {
			remote = "ssh://" + uid + "@%s/%s.git"
		}
	}
	host, err := config.GetString("host")
	if err != nil {
		panic(err.Error())
	}
	return fmt.Sprintf(remote, host, r.Name)
}
Exemple #30
0
func dockerCluster() *cluster.Cluster {
	cmutex.Lock()
	defer cmutex.Unlock()
	var clusterStorage cluster.Storage
	if dCluster == nil {
		if redisServer, err := config.GetString("docker:scheduler:redis-server"); err == nil {
			prefix, _ := config.GetString("docker:scheduler:redis-prefix")
			if password, err := config.GetString("docker:scheduler:redis-password"); err == nil {
				clusterStorage = storage.AuthenticatedRedis(redisServer, password, prefix)
			} else {
				clusterStorage = storage.Redis(redisServer, prefix)
			}
		}
		var nodes []cluster.Node
		if segregate, _ := config.GetBool("docker:segregate"); segregate {
			dCluster, _ = cluster.New(segScheduler, clusterStorage)
		} else {
			nodes = getDockerServers()
			dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
		}
	}
	return dCluster
}