func init() { config.ReadConfigFile("settings.yaml") //TODO: Error Checking basePath, _ := config.GetString("TEMPLATES:BASE") layoutsPath, _ := config.GetString("TEMPLATES:LAYOUTS") partialsPath, _ := config.GetString("TEMPLATES:PARTIALS") dir, _ := os.Getwd() templatesPath = filepath.Join(dir, basePath) fmt.Printf("Processing templates in %s\n", templatesPath) if templates == nil { templates = make(map[string]*template.Template) } layouts, err := filepath.Glob(templatesPath + "/" + layoutsPath + "/*") if err != nil { log.Fatal(err) } partials, err := filepath.Glob(templatesPath + "/" + partialsPath + "/*") if err != nil { log.Fatal(err) } for _, layout := range layouts { files := append(partials, layout) templates[filepath.Base(layout)] = template.Must(template.ParseFiles(files...)) } UserRepo = user.NewBaseUserRepository() if _, err := UserRepo.NewUser("jeff", "password"); err != nil { fmt.Println(err) } }
func main() { dry := flag.Bool("dry", false, "dry-run: does not start the server (for testing purpose)") configFile := flag.String("config", "/etc/gandalf.conf", "Gandalf configuration file") gVersion := flag.Bool("version", false, "Print version and exit") flag.Parse() if *gVersion { fmt.Printf("gandalf-webserver version %s\n", version) return } err := config.ReadAndWatchConfigFile(*configFile) if err != nil { msg := `Could not find gandalf config file. Searched on %s. For an example conf check gandalf/etc/gandalf.conf file.\n %s` log.Panicf(msg, *configFile, err) } router := api.SetupRouter() bind, err := config.GetString("bind") if err != nil { var perr error bind, perr = config.GetString("webserver:port") if perr != nil { panic(err) } } if !*dry { log.Fatal(http.ListenAndServe(bind, router)) } }
func buildUrl(command string, params map[string]string) (string, error) { apiKey, err := config.GetString("iaas:cloudstack:api-key") if err != nil { return "", err } secretKey, err := config.GetString("iaas:cloudstack:secret-key") if err != nil { return "", err } params["command"] = command params["response"] = "json" params["apiKey"] = apiKey var sorted_keys []string for k := range params { sorted_keys = append(sorted_keys, k) } sort.Strings(sorted_keys) var string_params []string for _, key := range sorted_keys { queryStringParam := fmt.Sprintf("%s=%s", key, url.QueryEscape(params[key])) string_params = append(string_params, queryStringParam) } queryString := strings.Join(string_params, "&") digest := hmac.New(sha1.New, []byte(secretKey)) digest.Write([]byte(strings.ToLower(queryString))) signature := base64.StdEncoding.EncodeToString(digest.Sum(nil)) cloudstackUrl, err := config.GetString("iaas:cloudstack:url") if err != nil { return "", err } return fmt.Sprintf("%s?%s&signature=%s", cloudstackUrl, queryString, url.QueryEscape(signature)), nil }
func sendGrid() error { username, uerr := config.GetString("username") if uerr != nil { log.Error("Error loading user name") } secretkey, serr := config.GetString("secretkey") if serr != nil { log.Error("Error loading secretkey") } sg := sendgrid.NewSendGridClient(username, secretkey) message := sendgrid.NewMail() email, kerr := config.GetString("addto:email") if kerr != nil { return kerr } message.AddTo(email) name, kerr := config.GetString("addto:name") if kerr != nil { return kerr } message.AddToName(name) message.SetSubject("GoMegam-IoT ") message.SetText("Welcome to Gomegam IoT Project") message.SetFrom("*****@*****.**") if r := sg.Send(message); r != nil { return r } return nil }
func (c *container) start() error { port, err := getPort() if err != nil { return err } sharedBasedir, _ := config.GetString("docker:sharedfs:hostdir") sharedMount, _ := config.GetString("docker:sharedfs:mountpoint") sharedIsolation, _ := config.GetBool("docker:sharedfs:app-isolation") sharedSalt, _ := config.GetString("docker:sharedfs:salt") config := docker.HostConfig{} config.PortBindings = map[docker.Port][]docker.PortBinding{ docker.Port(port + "/tcp"): {{HostIp: "", HostPort: ""}}, docker.Port("22/tcp"): {{HostIp: "", HostPort: ""}}, } if sharedBasedir != "" && sharedMount != "" { if sharedIsolation { var appHostDir string if sharedSalt != "" { h := crypto.SHA1.New() io.WriteString(h, sharedSalt+c.AppName) appHostDir = fmt.Sprintf("%x", h.Sum(nil)) } else { appHostDir = c.AppName } config.Binds = append(config.Binds, fmt.Sprintf("%s/%s:%s:rw", sharedBasedir, appHostDir, sharedMount)) } else { config.Binds = append(config.Binds, fmt.Sprintf("%s:%s:rw", sharedBasedir, sharedMount)) } } err = dockerCluster().StartContainer(c.ID, &config) if err != nil { return err } return nil }
func Queue() (monsterqueue.Queue, error) { queueData.RLock() if queueData.instance != nil { defer queueData.RUnlock() return queueData.instance, nil } queueData.RUnlock() queueData.Lock() defer queueData.Unlock() if queueData.instance != nil { return queueData.instance, nil } queueMongoUrl, _ := config.GetString("queue:mongo-url") if queueMongoUrl == "" { queueMongoUrl = "localhost:27017" } queueMongoDB, _ := config.GetString("queue:mongo-database") conf := mongodb.QueueConfig{ CollectionPrefix: "tsuru", Url: queueMongoUrl, Database: queueMongoDB, } var err error queueData.instance, err = mongodb.NewQueue(conf) if err != nil { return nil, fmt.Errorf("could not create queue instance, please check queue:mongo-url and queue:mongo-database config entries. error: %s", err) } shutdown.Register(&queueData) go queueData.instance.ProcessLoop() return queueData.instance, nil }
func (conf *Config) EnvListForEndpoint(dockerEndpoint, poolName string) ([]string, error) { tsuruEndpoint, _ := config.GetString("host") if !strings.HasPrefix(tsuruEndpoint, "http://") && !strings.HasPrefix(tsuruEndpoint, "https://") { tsuruEndpoint = "http://" + tsuruEndpoint } tsuruEndpoint = strings.TrimRight(tsuruEndpoint, "/") + "/" endpoint := dockerEndpoint socket, _ := config.GetString("docker:bs:socket") if socket != "" { endpoint = "unix:///var/run/docker.sock" } token, err := conf.getToken() if err != nil { return nil, err } envList := []string{ "DOCKER_ENDPOINT=" + endpoint, "TSURU_ENDPOINT=" + tsuruEndpoint, "TSURU_TOKEN=" + token, "SYSLOG_LISTEN_ADDRESS=udp://0.0.0.0:" + strconv.Itoa(container.BsSysLogPort()), } envMap := EnvMap{} poolEnvMap := PoolEnvMap{} err = conf.UpdateEnvMaps(envMap, poolEnvMap) if err != nil { return nil, err } for envName, envValue := range envMap { envList = append(envList, fmt.Sprintf("%s=%s", envName, envValue)) } for envName, envValue := range poolEnvMap[poolName] { envList = append(envList, fmt.Sprintf("%s=%s", envName, envValue)) } return envList, nil }
func postnetwork(container *global.Container, ip string) { gulpPort, _ := config.GetInt("docker:gulp_port") url := "http://" + container.SwarmNode + ":" + strconv.Itoa(gulpPort) + "/docker/networks" log.Info("URL:> %s", url) bridge, _ := config.GetString("docker:bridge") gateway, _ := config.GetString("docker:gateway") data := &global.DockerNetworksInfo{Bridge: bridge, ContainerId: container.ContainerID, IpAddr: ip, Gateway: gateway} res2B, _ := json.Marshal(data) req, err := http.NewRequest("POST", url, bytes.NewBuffer(res2B)) req.Header.Set("X-Custom-Header", "myvalue") req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { log.Error("gulpd client was failed : %s", err) } defer resp.Body.Close() log.Info("response Status : %s", resp.Status) log.Info("response Headers : %s", resp.Header) body, _ := ioutil.ReadAll(resp.Body) log.Info("response Body : %s", string(body)) }
func (redismqQFactory) get(name, consumerName string) (*redismqQ, error) { host, err := config.GetString("redis-queue:host") if err != nil { host = "localhost" } port, err := config.GetString("redis-queue:port") if err != nil { if nport, err := config.GetInt("redis-queue:port"); err != nil { port = "6379" } else { port = fmt.Sprintf("%d", nport) } } password, _ := config.GetString("redis-queue:password") db, err := config.GetInt("redis-queue:db") if err != nil { db = 3 } queue := redismq.CreateQueue(host, port, password, int64(db), name) consumer, err := queue.AddConsumer(consumerName) if err != nil { return nil, err } return &redismqQ{name: name, queue: queue, consumer: consumer}, nil }
func (factory *redismqQFactory) dial() (redis.Conn, error) { host, err := config.GetString("redis-queue:host") if err != nil { host = "localhost" } port, err := config.GetString("redis-queue:port") if err != nil { if nport, err := config.GetInt("redis-queue:port"); err != nil { port = "6379" } else { port = fmt.Sprintf("%d", nport) } } password, _ := config.GetString("redis-queue:password") db, err := config.GetInt("redis-queue:db") if err != nil { db = 3 } conn, err := redis.Dial("tcp", host+":"+port) if err != nil { return nil, err } if password != "" { _, err = conn.Do("AUTH", password) if err != nil { return nil, err } } _, err = conn.Do("SELECT", db) return conn, err }
func main() { dry := flag.Bool("dry", false, "dry-run: does not start the server (for testing purpose)") configFile := flag.String("config", "/etc/gandalf.conf", "Gandalf configuration file") gVersion := flag.Bool("version", false, "Print version and exit") flag.Parse() if *gVersion { fmt.Printf("gandalf-webserver version %s\n", version) return } log.Printf("Opening config file: %s ...\n", *configFile) err := config.ReadAndWatchConfigFile(*configFile) if err != nil { msg := `Could not open gandalf config file at %s (%s). For an example, see: gandalf/etc/gandalf.conf Note that you can specify a different config file with the --config option -- e.g.: --config=./etc/gandalf.conf` log.Fatalf(msg, *configFile, err) } log.Printf("Successfully read config file: %s\n", *configFile) router := api.SetupRouter() bind, err := config.GetString("bind") if err != nil { var perr error bind, perr = config.GetString("webserver:port") if perr != nil { panic(err) } } if !*dry { log.Fatal(http.ListenAndServe(bind, router)) } }
func (c *Container) user() string { user, err := config.GetString("docker:user") if err != nil { user, _ = config.GetString("docker:ssh:user") } return user }
// Run is the function that starts the collector. The dryMode parameter // indicates whether the collector should loop forever or not. // // It assumes the configuration has already been defined (from a config file or // memory). func Run(dryMode bool) { log.Init() connString, err := config.GetString("database:url") if err != nil { connString = db.DefaultDatabaseURL } dbName, err := config.GetString("database:name") if err != nil { dbName = db.DefaultDatabaseName } fmt.Printf("Using the database %q from the server %q.\n\n", dbName, connString) if !dryMode { provisioner, err := config.GetString("provisioner") if err != nil { fmt.Println("Warning: configuration didn't declare a provisioner, using default provisioner.") provisioner = "juju" } app.Provisioner, err = provision.Get(provisioner) if err != nil { fatal(err) } fmt.Printf("Using %q provisioner.\n\n", provisioner) timer, err := config.GetInt("collector:ticker-time") if err != nil { timer = 60 } ticker := time.Tick(time.Duration(timer) * time.Second) fmt.Println("tsuru collector agent started...") collect(ticker) } }
// RunAdminServer starts tsuru administrative api func RunAdminServer(dry bool) { log.Init() connString, err := config.GetString("database:url") if err != nil { connString = db.DefaultDatabaseURL } dbName, err := config.GetString("database:name") if err != nil { dbName = db.DefaultDatabaseName } fmt.Printf("Using the database %q from the server %q.\n\n", dbName, connString) if !dry { provisioner, err := getProvisioner() if err != nil { fmt.Printf("Warning: configuration didn't declare a provisioner, using default provisioner.\n") } app.Provisioner, err = provision.Get(provisioner) if err != nil { fatal(err) } fmt.Printf("Using %q provisioner.\n\n", provisioner) listen, err := config.GetString("admin-listen") if err != nil { fatal(err) } listener, err := net.Listen("tcp", listen) if err != nil { fatal(err) } fmt.Printf("tsuru HTTP server listening at %s...\n", listen) http.Handle("/", m) fatal(http.Serve(listener, nil)) } }
// sshCmds returns the commands needed to start a ssh daemon. func sshCmds() ([]string, error) { addKeyCommand, err := config.GetString("docker:ssh:add-key-cmd") if err != nil { return nil, err } keyFile, err := config.GetString("docker:ssh:public-key") if err != nil { if u, err := user.Current(); err == nil { keyFile = path.Join(u.HomeDir, ".ssh", "id_rsa.pub") } else { keyFile = os.ExpandEnv("${HOME}/.ssh/id_rsa.pub") } } f, err := filesystem().Open(keyFile) if err != nil { return nil, err } defer f.Close() keyContent, err := ioutil.ReadAll(f) if err != nil { return nil, err } sshdCommand, err := config.GetString("docker:ssh:sshd-path") if err != nil { sshdCommand = "sudo /usr/sbin/sshd" } return []string{ fmt.Sprintf("%s %s", addKeyCommand, bytes.TrimSpace(keyContent)), sshdCommand + " -D", }, nil }
func createRouter(routerName, configPrefix string) (router.Router, error) { apiUrl, err := config.GetString(configPrefix + ":api-url") if err != nil { return nil, err } scheduler, err := config.GetString(configPrefix + ":scheduler") if err != nil { scheduler = "rr" } mode, err := config.GetString(configPrefix + ":mode") if err != nil { mode = "nat" } client := fusisApi.NewClient(apiUrl) client.HttpClient = tsuruNet.Dial5Full60ClientNoKeepAlive r := &fusisRouter{ apiUrl: apiUrl, client: client, proto: "tcp", port: 80, scheduler: scheduler, mode: mode, } return r, nil }
/* * delete the machine from megam server using knife opennebula plugin */ func (i *MegamIaaS) DeleteMachine(pdc *global.PredefClouds, assembly *global.AssemblyWithComponents) (string, error) { accesskey, err_accesskey := config.GetString("opennebula:access_key") if err_accesskey != nil { return "", err_accesskey } secretkey, err_secretkey := config.GetString("opennebula:secret_key") if err_secretkey != nil { return "", err_secretkey } str, err := buildDelCommand(iaas.GetPlugins("opennebula"), pdc, "delete") if err != nil { return "", err } //str = str + " -P " + " -y " pair, perr := global.ParseKeyValuePair(assembly.Inputs, "domain") if perr != nil { log.Error("Failed to get the domain value : %s", perr) } str = str + " -N " + assembly.Name + "." + pair.Value str = str + " -A " + accesskey str = str + " -K " + secretkey knifePath, kerr := config.GetString("knife:path") if kerr != nil { return "", kerr } str = strings.Replace(str, " -c ", " -c "+knifePath+" ", -1) str = strings.Replace(str, "<node_name>", assembly.Name+"."+pair.Value, -1) return str, nil }
func (p *dockerProvisioner) RegistryAuthConfig() docker.AuthConfiguration { var authConfig docker.AuthConfiguration authConfig.Email, _ = config.GetString("docker:registry-auth:email") authConfig.Username, _ = config.GetString("docker:registry-auth:username") authConfig.Password, _ = config.GetString("docker:registry-auth:password") authConfig.ServerAddress, _ = config.GetString("docker:registry") return authConfig }
func FlagResolve(localAddr *string, serverAddr *string, username *string, password *string) { flag.Parse() if *helpFlag != false { //log.Log("info", "", nil) fmt.Println("Usage:...........!!!!!") os.Exit(0) } if *versionFlag != false { err := config.ReadConfigFile("../client/config/version.yml") if err != nil { fmt.Println(err) os.Exit(0) } version, _ := config.GetString("version") update, _ := config.GetList("update") instruction, _ := config.GetString("instruction") fmt.Printf("CaeserClient version: %s\n", version) fmt.Printf("New speciality contrast to old version: \n") for k, v := range update { fmt.Printf("%d-- %s\n", k+1, v) } fmt.Printf(" %s\n", instruction) os.Exit(0) } if *localFlag != "" { *localAddr = *localFlag log.Log("info", "you set a new addres", log.Fields{"address": *localFlag}) //fmt.Println("--Notice: you have set a new address", *localAddr) } else { //fmt.Println("--Didn't set the start port. Caesar will start at default port.") log.Log("info", "Didn't set the start port. Caesar will start at default port.", log.Fields{"default address": *localAddr}) } if *serverFlag != "" { fp, err := regexp.MatchString(ipPattern, *serverFlag) handleError(err) if !fp { //fmt.Printf("\"%s\" is not a valid address, please check it and try again!\n", *serverFlag) warnMsg := *serverFlag + "is not a valid address, please check it and try again!" log.Log("warn", warnMsg, nil) os.Exit(0) } *serverAddr = *serverFlag log.Log("info", "You have set a new server address", log.Fields{"new address": *serverAddr}) //fmt.Println("--Notice: you have set a new server address", *serverAddr) } else { log.Log("info", "Didn't set the server address.Caesar will connect the default address.", log.Fields{"new address": *serverAddr}) //fmt.Println("--Didn't set the server address. Caesar will connect the default address.") } if *userFlag != "" && *passwordFlag != "" { *username = *userFlag *password = *passwordFlag fmt.Println(*username, *password) } else { //fmt.Println("--Anonymous login, can do nothing! Please login with exgist user or register a new user.") log.Log("info", "Anonymous login, can do nothing! Please login with exgist user or register a new user.", nil) } }
func (p *dockerProvisioner) UploadDeploy(app provision.App, archiveFile io.ReadCloser, w io.Writer) (string, error) { defer archiveFile.Close() filePath := "/home/application/archive.tar.gz" user, err := config.GetString("docker:user") if err != nil { user, _ = config.GetString("docker:ssh:user") } options := docker.CreateContainerOptions{ Config: &docker.Config{ AttachStdout: true, AttachStderr: true, AttachStdin: true, OpenStdin: true, StdinOnce: true, User: user, Image: p.getBuildImage(app), Cmd: []string{"/bin/bash", "-c", "cat > " + filePath}, }, } cluster := p.Cluster() _, cont, err := cluster.CreateContainerSchedulerOpts(options, []string{app.GetName(), ""}) if err != nil { return "", err } defer cluster.RemoveContainer(docker.RemoveContainerOptions{ID: cont.ID, Force: true}) err = cluster.StartContainer(cont.ID, nil) if err != nil { return "", err } var output bytes.Buffer opts := docker.AttachToContainerOptions{ Container: cont.ID, OutputStream: &output, ErrorStream: &output, InputStream: archiveFile, Stream: true, Stdin: true, Stdout: true, Stderr: true, } status, err := container.SafeAttachWaitContainer(p, opts) if err != nil { return "", err } if status != 0 { log.Errorf("Failed to deploy container from upload: %s", &output) return "", fmt.Errorf("container exited with status %d", status) } image, err := cluster.CommitContainer(docker.CommitContainerOptions{Container: cont.ID}) if err != nil { return "", err } imageId, err := p.archiveDeploy(app, image.ID, "file://"+filePath, w) if err != nil { return "", err } return imageId, p.deployAndClean(app, imageId, w) }
func InitializeBS() (bool, error) { bsNodeContainer, err := LoadNodeContainer("", BsDefaultName) if err != nil { return false, err } if len(bsNodeContainer.Config.Env) > 0 { return false, nil } tokenData, err := app.AuthScheme.AppLogin(app.InternalAppName) if err != nil { return false, err } token := tokenData.GetValue() conf := configFor(BsDefaultName) isSet, _ := conf.SetFieldAtomic("", "Config.Env", []string{ "TSURU_TOKEN=" + token, }) if !isSet { // Already set by someone else, just bail out. app.AuthScheme.Logout(token) return false, nil } bsNodeContainer, err = LoadNodeContainer("", BsDefaultName) if err != nil { return true, err } tsuruEndpoint, _ := config.GetString("host") if !strings.HasPrefix(tsuruEndpoint, "http://") && !strings.HasPrefix(tsuruEndpoint, "https://") { tsuruEndpoint = "http://" + tsuruEndpoint } tsuruEndpoint = strings.TrimRight(tsuruEndpoint, "/") + "/" socket, _ := config.GetString("docker:bs:socket") image, _ := config.GetString("docker:bs:image") if image == "" { image = bsDefaultImageName } bsPort, _ := config.GetInt("docker:bs:syslog-port") if bsPort == 0 { bsPort = 1514 } bsNodeContainer.Name = BsDefaultName bsNodeContainer.Config.Env = append(bsNodeContainer.Config.Env, []string{ "TSURU_ENDPOINT=" + tsuruEndpoint, "HOST_PROC=" + bsHostProc, "SYSLOG_LISTEN_ADDRESS=" + fmt.Sprintf("udp://0.0.0.0:%d", bsPort), }...) bsNodeContainer.Config.Image = image bsNodeContainer.HostConfig.RestartPolicy = docker.AlwaysRestart() bsNodeContainer.HostConfig.Privileged = true bsNodeContainer.HostConfig.NetworkMode = "host" bsNodeContainer.HostConfig.Binds = []string{fmt.Sprintf("/proc:%s:ro", bsHostProc)} if socket != "" { bsNodeContainer.Config.Env = append(bsNodeContainer.Config.Env, "DOCKER_ENDPOINT=unix:///var/run/docker.sock") bsNodeContainer.HostConfig.Binds = append(bsNodeContainer.HostConfig.Binds, fmt.Sprintf("%s:/var/run/docker.sock:rw", socket)) } return true, conf.Save("", bsNodeContainer) }
// runWithAgentCmds returns the list of commands that should be passed when the // provisioner will run a unit using tsuru_unit_agent to start. // // This will only be called for legacy containers that have not been re- // deployed since the introduction of independent units per 'process' in // 0.12.0. func runWithAgentCmds(app provision.App) ([]string, error) { runCmd, err := config.GetString("docker:run-cmd:bin") if err != nil { return nil, err } host, _ := config.GetString("host") token := app.Envs()["TSURU_APP_TOKEN"].Value return []string{"tsuru_unit_agent", host, token, app.GetName(), runCmd}, nil }
func (factory *redisPubSubFactory) dial() (redis.Conn, error) { host, err := config.GetString("pubsub:redis-host") if err != nil { host, err = config.GetString("redis-queue:host") if err != nil { host = "localhost" } } port, err := config.Get("pubsub:redis-port") if err != nil { port, err = config.Get("redis-queue:port") if err != nil { port = "6379" } } port = fmt.Sprintf("%v", port) password, err := config.GetString("pubsub:redis-password") if err != nil { password, _ = config.GetString("redis-queue:password") } db, err := config.GetInt("pubsub:redis-db") if err != nil { db, err = config.GetInt("redis-queue:db") if err != nil { db = 3 } } secondFloat := float64(time.Second) dialTimeout, err := config.GetFloat("pubsub:redis-dial-timeout") if err != nil { dialTimeout = 0.1 } dialTimeout = dialTimeout * secondFloat readTimeout, err := config.GetFloat("pubsub:redis-read-timeout") if err != nil { readTimeout = 30 * 60 } readTimeout = readTimeout * secondFloat writeTimeout, err := config.GetFloat("pubsub:redis-write-timeout") if err != nil { writeTimeout = 0.5 } writeTimeout = writeTimeout * secondFloat conn, err := redis.DialTimeout("tcp", fmt.Sprintf("%s:%v", host, port), time.Duration(dialTimeout), time.Duration(readTimeout), time.Duration(writeTimeout)) if err != nil { return nil, err } if password != "" { _, err = conn.Do("AUTH", password) if err != nil { return nil, err } } _, err = conn.Do("SELECT", db) return conn, err }
func basicImageName() string { parts := make([]string, 0, 2) registry, _ := config.GetString("docker:registry") if registry != "" { parts = append(parts, registry) } repoNamespace, _ := config.GetString("docker:repository-namespace") parts = append(parts, repoNamespace) return strings.Join(parts, "/") }
func assembleImageName(appName string) string { parts := make([]string, 0, 3) registry, _ := config.GetString("docker:registry") if registry != "" { parts = append(parts, registry) } repoNamespace, _ := config.GetString("docker:repository-namespace") parts = append(parts, repoNamespace, appName) return strings.Join(parts, "/") }
func (s *S) TestReadWriteURLUseUidFromConfigFile(c *gocheck.C) { uid, err := config.GetString("uid") c.Assert(err, gocheck.IsNil) host, err := config.GetString("host") c.Assert(err, gocheck.IsNil) config.Set("uid", "test") defer config.Set("uid", uid) remote := (&Repository{Name: "f#"}).ReadWriteURL() c.Assert(remote, gocheck.Equals, fmt.Sprintf("test@%s:f#.git", host)) }
func DbConfig() (string, string) { url, _ := config.GetString("database:url") if url == "" { url = DefaultDatabaseURL } dbname, _ := config.GetString("database:name") if dbname == "" { dbname = DefaultDatabaseName } return url, dbname }
func checkPubSub() error { oldConfig, _ := config.GetString("redis-queue:host") if oldConfig != "" { return config.NewWarning(`Using "redis-queue:*" is deprecated. Please change your tsuru.conf to use "pubsub:*" options. See http://docs.tsuru.io/en/latest/reference/config.html#pubsub for more details.`) } redisHost, _ := config.GetString("pubsub:redis-host") if redisHost == "" { return config.NewWarning(`Config entry "pubsub:redis-host" is not set, default "localhost" will be used. Running "tsuru app-log -f" might not work.`) } return nil }
func (s *S) TestReadWriteURLWithSSH(c *gocheck.C) { config.Set("git:ssh:use", true) defer config.Unset("git:ssh:use") uid, err := config.GetString("uid") c.Assert(err, gocheck.IsNil) host, err := config.GetString("host") c.Assert(err, gocheck.IsNil) remote := (&Repository{Name: "lol"}).ReadWriteURL() expected := fmt.Sprintf("ssh://%s@%s/lol.git", uid, host) c.Assert(remote, gocheck.Equals, expected) }
// conn reads the gandalf config and calls storage.Open to get a database connection. // // Most gandalf packages should probably use this function. storage.Open is intended for // use when supporting more than one database. func conn() (*storage.Storage, error) { url, _ := config.GetString("database:url") if url == "" { url = DefaultDatabaseURL } dbname, _ := config.GetString("database:name") if dbname == "" { dbname = DefaultDatabaseName } return storage.Open(url, dbname) }