func (s *S) TestHealthCheckDockerRegistryV2TLS(c *check.C) { var request *http.Request server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { request = r w.Write([]byte("{}")) })) defer server.Close() oldTransport := tsuruNet.Dial5Full60ClientNoKeepAlive.Transport defer func() { tsuruNet.Dial5Full60ClientNoKeepAlive.Transport = oldTransport }() tsuruNet.Dial5Full60ClientNoKeepAlive.Transport = &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } if old, err := config.Get("docker:registry"); err == nil { defer config.Set("docker:registry", old) } else { defer config.Unset("docker:registry") } config.Set("docker:registry", server.URL+"/") err := healthCheckDockerRegistry() c.Assert(err, check.IsNil) c.Assert(request.URL.Path, check.Equals, "/v2/") c.Assert(request.Method, check.Equals, "GET") }
func (s *CheckerSuite) TestCheckBeanstalkdDefinedInQueue(c *check.C) { old, _ := config.Get("queue") defer config.Set("queue", old) config.Set("queue", "beanstalkd") err := checkBeanstalkd() c.Assert(err.Error(), check.Equals, "beanstalkd is no longer supported, please use redis instead") }
func (s *S) SetUpSuite(c *check.C) { config.Set("routers:vulcand:domain", "vulcand.example.com") config.Set("routers:vulcand:type", "vulcand") config.Set("routers:vulcand:api-url", "127.0.0.1:8181") config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "router_vulcand_tests") }
func (s *S) SetUpSuite(c *check.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "provision_tests_s") var err error s.storage, err = db.Conn() c.Assert(err, check.IsNil) }
func (s *S) SetUpSuite(c *check.C) { err := config.ReadConfigFile("testdata/config.yaml") c.Assert(err, check.IsNil) config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_api_base_test") app.LogPubSubQueuePrefix = "pubsub:api-base-test:" }
func (s *HandlersSuite) TestAutoScaleListRulesWithDBConfig(c *check.C) { config.Set("docker:auto-scale:scale-down-ratio", 2.0) defer config.Unset("docker:auto-scale:max-container-count") config.Set("docker:scheduler:total-memory-metadata", "maxmemory") defer config.Unset("docker:scheduler:total-memory-metadata") rules := []autoScaleRule{ {MetadataFilter: "", Enabled: true, MaxContainerCount: 10, ScaleDownRatio: 1.2}, {MetadataFilter: "pool1", Enabled: true, ScaleDownRatio: 1.1, MaxMemoryRatio: 2.0}, } for _, r := range rules { err := r.update() c.Assert(err, check.IsNil) } recorder := httptest.NewRecorder() request, err := http.NewRequest("GET", "/docker/autoscale/rules", nil) c.Assert(err, check.IsNil) request.Header.Set("Authorization", "bearer "+s.token.GetValue()) server := api.RunServer(true) server.ServeHTTP(recorder, request) c.Assert(recorder.Code, check.Equals, http.StatusOK) var reqRules []autoScaleRule err = json.Unmarshal(recorder.Body.Bytes(), &reqRules) c.Assert(err, check.IsNil) c.Assert(reqRules, check.DeepEquals, rules) }
func (s *HandlersSuite) TestAutoScaleRunHandler(c *check.C) { mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{}, cluster.Node{Address: "localhost:1999", Metadata: map[string]string{ "pool": "pool1", }}, ) config.Set("docker:auto-scale:group-by-metadata", "pool") config.Set("docker:auto-scale:max-container-count", 2) defer config.Unset("docker:auto-scale:max-container-count") defer config.Unset("docker:auto-scale:group-by-metadata") recorder := httptest.NewRecorder() request, err := http.NewRequest("POST", "/docker/autoscale/run", nil) c.Assert(err, check.IsNil) request.Header.Set("Authorization", "bearer "+s.token.GetValue()) server := api.RunServer(true) server.ServeHTTP(recorder, request) c.Assert(recorder.Code, check.Equals, http.StatusOK) body := recorder.Body.String() parts := strings.Split(body, "\n") c.Assert(parts, check.DeepEquals, []string{ `{"Message":"[node autoscale] running scaler *docker.countScaler for \"pool\": \"pool1\"\n"}`, `{"Message":"[node autoscale] nothing to do for \"pool\": \"pool1\"\n"}`, ``, }) }
func (s *S) TestHealerHealNodeWaitAndRegisterError(c *check.C) { iaas.RegisterIaasProvider("my-healer-iaas", iaasTesting.NewHealerIaaSConstructor("addr1", nil)) _, err := iaas.CreateMachineForIaaS("my-healer-iaas", map[string]string{}) c.Assert(err, check.IsNil) iaas.RegisterIaasProvider("my-healer-iaas", iaasTesting.NewHealerIaaSConstructor("addr2", nil)) config.Set("iaas:node-protocol", "http") config.Set("iaas:node-port", 2) defer config.Unset("iaas:node-protocol") defer config.Unset("iaas:node-port") p := provisiontest.ProvisionerInstance err = p.AddNode(provision.AddNodeOptions{ Address: "http://addr1:1", Metadata: map[string]string{"iaas": "my-healer-iaas"}, }) c.Assert(err, check.IsNil) p.PrepareFailure("AddNode", fmt.Errorf("add node error")) healer := newNodeHealer(nodeHealerArgs{ WaitTimeNewMachine: time.Second, }) healer.Shutdown() nodes, err := p.ListNodes(nil) c.Assert(err, check.IsNil) c.Assert(nodes, check.HasLen, 1) c.Assert(nodes[0].Address(), check.Equals, "http://addr1:1") created, err := healer.healNode(nodes[0]) c.Assert(err, check.ErrorMatches, ".*error registering new node: add node error.*") c.Assert(created, check.IsNil) nodes, err = p.ListNodes(nil) c.Assert(err, check.IsNil) c.Assert(nodes, check.HasLen, 1) c.Assert(nodes[0].Address(), check.Equals, "http://addr1:1") c.Assert(nodes[0].Status(), check.Equals, "enabled") }
func (s *ExternalSuite) TestSwapWithDifferentRouterKinds(c *check.C) { config.Set("hipache:redis-server", "127.0.0.1:6379") config.Set("hipache:redis-db", 5) backend1 := "bb1" backend2 := "bb2" r1, err := router.Get("fake") c.Assert(err, check.IsNil) r2, err := router.Get("hipache") c.Assert(err, check.IsNil) err = r1.AddBackend(backend1) c.Assert(err, check.IsNil) defer r1.RemoveBackend(backend1) addr1, _ := url.Parse("http://127.0.0.1") err = r1.AddRoute(backend1, addr1) c.Assert(err, check.IsNil) defer r1.RemoveRoute(backend1, addr1) err = r2.AddBackend(backend2) c.Assert(err, check.IsNil) defer r2.RemoveBackend(backend2) addr2, _ := url.Parse("http://10.10.10.10") err = r2.AddRoute(backend2, addr2) c.Assert(err, check.IsNil) defer r2.RemoveRoute(backend2, addr2) err = router.Swap(r1, backend1, backend2, false) c.Assert(err, check.ErrorMatches, `swap is only allowed between routers of the same kind. "bb1" uses "fake", "bb2" uses "hipache"`) err = router.Swap(r2, backend1, backend2, false) c.Assert(err, check.ErrorMatches, `swap is only allowed between routers of the same kind. "bb1" uses "fake", "bb2" uses "hipache"`) }
func (s *S) SetUpSuite(c *check.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "router_fake_tests") config.Set("routers:fake:type", "fake") config.Set("routers:fake-hc:type", "fake-hc") s.localhost, _ = url.Parse("http://127.0.0.1") }
func (s *ActionsSuite) SetUpSuite(c *gocheck.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_api_actions_test") var err error s.conn, err = db.Conn() c.Assert(err, gocheck.IsNil) }
func (s *S) TestInitializeBS(c *check.C) { config.Set("host", "127.0.0.1:8080") config.Set("docker:bs:image", "tsuru/bs:v10") defer config.Unset("host") defer config.Unset("docker:bs:image") initialized, err := InitializeBS() c.Assert(err, check.IsNil) c.Assert(initialized, check.Equals, true) nodeContainer, err := LoadNodeContainer("", BsDefaultName) c.Assert(err, check.IsNil) c.Assert(nodeContainer.Config.Env[0], check.Matches, `^TSURU_TOKEN=.{40}$`) nodeContainer.Config.Env = nodeContainer.Config.Env[1:] c.Assert(nodeContainer, check.DeepEquals, &NodeContainerConfig{ Name: BsDefaultName, Config: docker.Config{ Image: "tsuru/bs:v10", Env: []string{ "TSURU_ENDPOINT=http://127.0.0.1:8080/", "HOST_PROC=/prochost", "SYSLOG_LISTEN_ADDRESS=udp://0.0.0.0:1514", }, }, HostConfig: docker.HostConfig{ RestartPolicy: docker.AlwaysRestart(), Privileged: true, NetworkMode: "host", Binds: []string{"/proc:/prochost:ro"}, }, }) initialized, err = InitializeBS() c.Assert(err, check.IsNil) c.Assert(initialized, check.Equals, false) }
func (s *S) TestAddBackendWithVpc(c *gocheck.C) { old, _ := config.Get("juju:elb-avail-zones") config.Unset("juju:elb-avail-zones") config.Set("juju:elb-use-vpc", true) config.Set("juju:elb-vpc-subnets", []string{"subnet-a4a3a2a1", "subnet-002200"}) config.Set("juju:elb-vpc-secgroups", []string{"sg-0900"}) defer func() { config.Set("juju:elb-avail-zones", old) config.Unset("juju:elb-use-vpc") config.Unset("juju:elb-vpc-subnets") config.Unset("juju:elb-vpc-secgroups") }() router := elbRouter{} err := router.AddBackend("tip") c.Assert(err, gocheck.IsNil) defer router.RemoveBackend("tip") resp, err := s.client.DescribeLoadBalancers("tip") c.Assert(err, gocheck.IsNil) c.Assert(resp.LoadBalancerDescriptions, gocheck.HasLen, 1) lbd := resp.LoadBalancerDescriptions[0] c.Assert(lbd.Subnets, gocheck.DeepEquals, []string{"subnet-a4a3a2a1", "subnet-002200"}) c.Assert(lbd.SecurityGroups, gocheck.DeepEquals, []string{"sg-0900"}) c.Assert(lbd.Scheme, gocheck.Equals, "internal") c.Assert(lbd.AvailZones, gocheck.HasLen, 0) }
func (s *QuotaSuite) SetUpSuite(c *check.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_api_quota_test") config.Set("admin-team", "superteam") config.Set("auth:hash-cost", 4) config.Set("repo-manager", "fake") }
func (s *HandlersSuite) TestAutoScaleRunHandler(c *check.C) { mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{}, cluster.Node{Address: "localhost:1999", Metadata: map[string]string{ "pool": "pool1", }}, ) config.Set("docker:auto-scale:enabled", true) defer config.Unset("docker:auto-scale:enabled") config.Set("docker:auto-scale:max-container-count", 2) defer config.Unset("docker:auto-scale:max-container-count") recorder := httptest.NewRecorder() request, err := http.NewRequest("POST", "/docker/autoscale/run", nil) c.Assert(err, check.IsNil) request.Header.Set("Authorization", "bearer "+s.token.GetValue()) server := api.RunServer(true) server.ServeHTTP(recorder, request) c.Assert(recorder.Code, check.Equals, http.StatusOK) c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream") body := recorder.Body.String() parts := strings.Split(body, "\n") c.Assert(parts, check.DeepEquals, []string{ `{"Message":"running scaler *docker.countScaler for \"pool\": \"pool1\"\n"}`, `{"Message":"nothing to do for \"pool\": \"pool1\"\n"}`, ``, }) c.Assert(eventtest.EventDesc{ Target: event.Target{Type: event.TargetTypePool}, Owner: s.token.GetUserName(), Kind: "node.autoscale.update.run", }, eventtest.HasEvent) }
func (s *HandlersSuite) TestListNodeHandlerWithoutCluster(c *gocheck.C) { var result []node config.Set("docker:segregate", true) defer config.Unset("docker:segregate") config.Set("docker:scheduler:redis-server", "127.0.0.1:6379") defer config.Unset("docker:scheduler:redis-server") dCluster = nil err := s.conn.Collection(schedulerCollection).Insert(node{Address: "host.com:4243", ID: "server01"}) c.Assert(err, gocheck.IsNil) defer s.conn.Collection(schedulerCollection).RemoveId("server01") err = s.conn.Collection(schedulerCollection).Insert(node{Address: "host.com:4243", ID: "server02"}) c.Assert(err, gocheck.IsNil) defer s.conn.Collection(schedulerCollection).RemoveId("server02") req, err := http.NewRequest("GET", "/node/", nil) rec := httptest.NewRecorder() err = listNodeHandler(rec, req, nil) c.Assert(err, gocheck.IsNil) body, err := ioutil.ReadAll(rec.Body) c.Assert(err, gocheck.IsNil) err = json.Unmarshal(body, &result) c.Assert(err, gocheck.IsNil) c.Assert(result[0].ID, gocheck.Equals, "server01") c.Assert(result[0].Address, gocheck.DeepEquals, "host.com:4243") c.Assert(result[1].ID, gocheck.Equals, "server02") c.Assert(result[1].Address, gocheck.DeepEquals, "host.com:4243") }
func (s *HandlersSuite) TestAutoScaleListRulesWithLegacyConfig(c *check.C) { config.Set("docker:auto-scale:metadata-filter", "mypool") config.Set("docker:auto-scale:max-container-count", 4) config.Set("docker:auto-scale:scale-down-ratio", 1.5) config.Set("docker:auto-scale:prevent-rebalance", true) config.Set("docker:scheduler:max-used-memory", 0.9) defer config.Unset("docker:auto-scale:metadata-filter") defer config.Unset("docker:auto-scale:max-container-count") defer config.Unset("docker:auto-scale:scale-down-ratio") defer config.Unset("docker:auto-scale:prevent-rebalance") defer config.Unset("docker:scheduler:max-used-memory") recorder := httptest.NewRecorder() request, err := http.NewRequest("GET", "/docker/autoscale/rules", nil) c.Assert(err, check.IsNil) request.Header.Set("Authorization", "bearer "+s.token.GetValue()) server := api.RunServer(true) server.ServeHTTP(recorder, request) c.Assert(recorder.Code, check.Equals, http.StatusOK) var rules []autoScaleRule err = json.Unmarshal(recorder.Body.Bytes(), &rules) c.Assert(err, check.IsNil) c.Assert(rules, check.DeepEquals, []autoScaleRule{ {MetadataFilter: "mypool", Enabled: true, MaxContainerCount: 4, ScaleDownRatio: 1.5, PreventRebalance: true, MaxMemoryRatio: 0.9}, }) }
func (s *S) TestContainerCommitWithRegistry(c *check.C) { config.Set("docker:registry-max-try", 1) config.Set("docker:registry", "localhost:3030") defer config.Unset("docker:registry") cont, err := s.newContainer(nil, nil) c.Assert(err, check.IsNil) defer s.removeTestContainer(cont) buf := bytes.Buffer{} nextImgName, err := appNewImageName(cont.AppName) c.Assert(err, check.IsNil) cont.BuildingImage = nextImgName calls := 0 s.server.SetHook(func(r *http.Request) { if ok, _ := regexp.MatchString("/images/.*?/push", r.URL.Path); ok { calls++ } }) defer s.server.SetHook(nil) imageId, err := cont.commit(s.p, &buf) c.Assert(err, check.IsNil) repoNamespace, _ := config.GetString("docker:repository-namespace") repository := "localhost:3030/" + repoNamespace + "/app-" + cont.AppName + ":v1" c.Assert(imageId, check.Equals, repository) c.Assert(calls, check.Equals, 1) }
func (s *InstanceSuite) SetUpSuite(c *check.C) { var err error config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_service_instance_test") s.conn, err = db.Conn() c.Assert(err, check.IsNil) }
func (s *S) TestContainerCommitRetryShouldNotBeLessThanOne(c *check.C) { s.server.PrepareMultiFailures("i/o timeout", "/images/.*?/push") s.server.PrepareMultiFailures("i/o timeout", "/images/.*?/push") defer s.server.ResetMultiFailures() config.Set("docker:registry-max-try", -1) config.Set("docker:registry", "localhost:3030") defer config.Unset("docker:registry") cont, err := s.newContainer(nil, nil) c.Assert(err, check.IsNil) defer s.removeTestContainer(cont) buf := bytes.Buffer{} nextImgName, err := appNewImageName(cont.AppName) c.Assert(err, check.IsNil) cont.BuildingImage = nextImgName calls := 0 s.server.SetHook(func(r *http.Request) { if ok, _ := regexp.MatchString("/images/.*?/push", r.URL.Path); ok { calls++ } }) defer s.server.SetHook(nil) _, err = cont.commit(s.p, &buf) c.Assert(err, check.IsNil) c.Assert(calls, check.Equals, 3) }
func (s *HandlersSuite) TestListNodeHandlerWithoutCluster(c *gocheck.C) { var result []map[string]string p1 := Pool{Name: "pool1", Nodes: []string{"host.com:4243"}} p2 := Pool{Name: "pool2", Nodes: []string{"host.com:4243"}} err := s.conn.Collection(schedulerCollection).Insert(p1, p2) c.Assert(err, gocheck.IsNil) defer s.conn.Collection(schedulerCollection).RemoveId(p1.Name) defer s.conn.Collection(schedulerCollection).RemoveId(p2.Name) config.Set("docker:segregate", true) defer config.Unset("docker:segregate") config.Set("docker:scheduler:redis-server", "127.0.0.1:6379") defer config.Unset("docker:scheduler:redis-server") dCluster = nil req, err := http.NewRequest("GET", "/node/", nil) rec := httptest.NewRecorder() err = listNodeHandler(rec, req, nil) c.Assert(err, gocheck.IsNil) body, err := ioutil.ReadAll(rec.Body) c.Assert(err, gocheck.IsNil) err = json.Unmarshal(body, &result) c.Assert(err, gocheck.IsNil) c.Assert(result[0]["ID"], gocheck.DeepEquals, "host.com:4243") c.Assert(result[0]["Address"], gocheck.DeepEquals, "host.com:4243") c.Assert(result[1]["ID"], gocheck.DeepEquals, "host.com:4243") c.Assert(result[1]["Address"], gocheck.DeepEquals, "host.com:4243") }
func init() { suite := &routertest.RouterSuite{ SetUpSuiteFunc: func(c *check.C) { config.Set("routers:fusis:domain", "fusis.com") config.Set("routers:fusis:type", "fusis") config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "router_fusis_tests") }, } var fakeServer *fusisTesting.FakeFusisServer suite.SetUpTestFunc = func(c *check.C) { var err error fakeServer = fusisTesting.NewFakeFusisServer() config.Set("routers:fusis:api-url", fakeServer.URL) fRouter, err := createRouter("fusis", "routers:fusis") c.Assert(err, check.IsNil) suite.Router = fRouter conn, err := db.Conn() c.Assert(err, check.IsNil) defer conn.Close() dbtest.ClearAllCollections(conn.Collection("router_fusis_tests").Database) } suite.TearDownTestFunc = func(c *check.C) { fakeServer.Close() } check.Suite(suite) }
func (s *ConsumptionSuite) SetUpTest(c *check.C) { repositorytest.Reset() config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_api_consumption_test") config.Set("auth:hash-cost", 4) config.Set("repo-manager", "fake") var err error s.conn, err = db.Conn() c.Assert(err, check.IsNil) dbtest.ClearAllCollections(s.conn.Apps().Database) s.team = &auth.Team{Name: "tsuruteam"} err = s.conn.Teams().Insert(s.team) c.Assert(err, check.IsNil) s.token = customUserWithPermission(c, "consumption-master-user", permission.Permission{ Scheme: permission.PermServiceInstance, Context: permission.Context(permission.CtxTeam, s.team.Name), }, permission.Permission{ Scheme: permission.PermServiceRead, Context: permission.Context(permission.CtxTeam, s.team.Name), }) s.user, err = s.token.User() c.Assert(err, check.IsNil) app.AuthScheme = nativeScheme s.provisioner = provisiontest.NewFakeProvisioner() app.Provisioner = s.provisioner }
func (s *S) SetUpSuite(c *gocheck.C) { var err error config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "router_fake_tests") s.conn, err = db.Conn() c.Assert(err, gocheck.IsNil) }
func (s *LogSuite) SetUpSuite(c *check.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_log_api_tests") config.Set("auth:hash-cost", 4) config.Set("repo-manager", "fake") app.LogPubSubQueuePrefix = "pubsub:api-log-test:" }
func (s *S) TestRegisterAndGet(c *check.C) { var r Router var prefixes []string var names []string routerCreator := func(name, prefix string) (Router, error) { names = append(names, name) prefixes = append(prefixes, prefix) return r, nil } Register("router", routerCreator) config.Set("routers:mine:type", "router") defer config.Unset("routers:mine:type") got, err := Get("mine") c.Assert(err, check.IsNil) c.Assert(r, check.DeepEquals, got) c.Assert(names, check.DeepEquals, []string{"mine"}) c.Assert(prefixes, check.DeepEquals, []string{"routers:mine"}) _, err = Get("unknown-router") c.Assert(err, check.Not(check.IsNil)) c.Assert("config key 'routers:unknown-router:type' not found", check.Equals, err.Error()) config.Set("routers:mine-unknown:type", "unknown") defer config.Unset("routers:mine-unknown:type") _, err = Get("mine-unknown") c.Assert(err, check.Not(check.IsNil)) c.Assert(`unknown router: "unknown".`, check.Equals, err.Error()) }
func (s *WriterSuite) SetUpSuite(c *gocheck.C) { var err error config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_api_writer_test") s.conn, err = db.Conn() c.Assert(err, gocheck.IsNil) }
func (s *S) TestDockerCluster(c *gocheck.C) { config.Set("docker:servers", []string{"http://localhost:4243", "http://10.10.10.10:4243"}) defer config.Unset("docker:servers") nodes, err := dCluster.Nodes() c.Assert(err, gocheck.IsNil) cmutex.Lock() dCluster = nil cmutex.Unlock() defer func() { cmutex.Lock() defer cmutex.Unlock() dCluster, err = cluster.New(nil, &cluster.MapStorage{}, nodes...) c.Assert(err, gocheck.IsNil) }() config.Set("docker:cluster:redis-server", "127.0.0.1:6379") defer config.Unset("docker:cluster:redis-server") clus := dockerCluster() c.Assert(clus, gocheck.NotNil) currentNodes, err := clus.Nodes() c.Assert(err, gocheck.IsNil) sortedNodes := NodeList(currentNodes) sort.Sort(sortedNodes) c.Assert(sortedNodes, gocheck.DeepEquals, NodeList([]cluster.Node{ {Address: "http://10.10.10.10:4243", Metadata: map[string]string{}}, {Address: "http://localhost:4243", Metadata: map[string]string{}}, })) }
func (s *S) SetUpTest(c *check.C) { config.Set("database:url", "127.0.0.1:27017") config.Set("database:name", "tsuru_events_migrate_tests") conn, err := db.Conn() c.Assert(err, check.IsNil) defer conn.Close() err = dbtest.ClearAllCollections(conn.Events().Database) c.Assert(err, check.IsNil) config.Set("routers:fake:type", "fake") err = (&app.Plan{Name: "default", Router: "fake", CpuShare: 100, Default: true}).Save() c.Assert(err, check.IsNil) nativeScheme := auth.ManagedScheme(native.NativeScheme{}) app.AuthScheme = nativeScheme s.user = &auth.User{Email: "*****@*****.**", Password: "******"} _, err = nativeScheme.Create(s.user) c.Assert(err, check.IsNil) s.team = &auth.Team{Name: "angra"} err = conn.Teams().Insert(s.team) c.Assert(err, check.IsNil) provision.DefaultProvisioner = "fake" provisiontest.ProvisionerInstance.Reset() opts := provision.AddPoolOptions{Name: "test1", Default: true} err = provision.AddPool(opts) c.Assert(err, check.IsNil) }
func (s *S) TestGetPortInteger(c *gocheck.C) { old, _ := config.Get("docker:run-cmd:port") defer config.Set("docker:run-cmd:port", old) config.Set("docker:run-cmd:port", 8888) port, err := getPort() c.Assert(err, gocheck.IsNil) c.Assert(port, gocheck.Equals, "8888") }