func dockerCluster() *cluster.Cluster {
	cmutex.Lock()
	defer cmutex.Unlock()
	if dCluster == nil {
		debug, _ := config.GetBool("debug")
		clusterLog.SetDebug(debug)
		clusterLog.SetLogger(log.GetStdLogger())
		clusterStorage, err := buildClusterStorage()
		if err != nil {
			panic(err.Error())
		}
		var nodes []cluster.Node
		if isSegregateScheduler() {
			dCluster, _ = cluster.New(&segregatedScheduler{}, clusterStorage)
		} else {
			nodes = getDockerServers()
			dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
		}
		autoHealing, _ := config.GetBool("docker:auto-healing")
		if autoHealing {
			healer := Healer{}
			dCluster.SetHealer(&healer)
		}
		activeMonitoring, _ := config.GetBool("docker:active-monitoring")
		if activeMonitoring {
			dCluster.StartActiveMonitoring(1 * time.Minute)
		}
	}
	return dCluster
}
Exemple #2
0
func (s *S) SetUpTest(c *check.C) {
	iaas.ResetAll()
	repositorytest.Reset()
	queue.ResetQueue()
	s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
	err := s.p.Initialize()
	c.Assert(err, check.IsNil)
	queue.ResetQueue()
	app.Provisioner = s.p
	s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	s.p.cluster, err = cluster.New(nil, s.p.storage,
		cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-fallback"}},
	)
	c.Assert(err, check.IsNil)
	mainDockerProvisioner = s.p
	coll := s.p.collection()
	defer coll.Close()
	err = dbtest.ClearAllCollectionsExcept(coll.Database, []string{"users", "tokens", "teams"})
	c.Assert(err, check.IsNil)
	err = clearClusterStorage(s.clusterSess)
	c.Assert(err, check.IsNil)
	routertest.FakeRouter.Reset()
	opts := provision.AddPoolOptions{Name: "test-fallback"}
	err = provision.AddPool(opts)
	c.Assert(err, check.IsNil)
}
func (s *S) TestPushImage(c *gocheck.C) {
	var request *http.Request
	server, err := dtesting.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
		request = r
	})
	c.Assert(err, gocheck.IsNil)
	defer server.Stop()
	config.Set("docker:registry", "localhost:3030")
	defer config.Unset("docker:registry")
	var storage cluster.MapStorage
	storage.StoreImage("localhost:3030/base", server.URL())
	cmutex.Lock()
	oldDockerCluster := dCluster
	dCluster, _ = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL()})
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		defer cmutex.Unlock()
		dCluster = oldDockerCluster
	}()
	err = newImage("localhost:3030/base", "http://index.docker.io")
	c.Assert(err, gocheck.IsNil)
	err = pushImage("localhost:3030/base")
	c.Assert(err, gocheck.IsNil)
	c.Assert(request.URL.Path, gocheck.Matches, ".*/images/localhost:3030/base/push$")
}
func (s *S) TestContainerNetworkInfoNotFound(c *gocheck.C) {
	inspectOut := `{
	"NetworkSettings": {
		"IpAddress": "10.10.10.10",
		"IpPrefixLen": 8,
		"Gateway": "10.65.41.1",
		"Ports": {}
	}
}`
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if strings.Contains(r.URL.Path, "/containers/") {
			w.Write([]byte(inspectOut))
		}
	}))
	defer server.Close()
	var storage cluster.MapStorage
	storage.StoreContainer("c-01", server.URL)
	oldCluster := dockerCluster()
	var err error
	dCluster, err = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL},
	)
	c.Assert(err, gocheck.IsNil)
	defer func() {
		dCluster = oldCluster
	}()
	container := container{ID: "c-01"}
	info, err := container.networkInfo()
	c.Assert(info.IP, gocheck.Equals, "10.10.10.10")
	c.Assert(info.SSHHostPort, gocheck.Equals, "")
	c.Assert(info.HTTPHostPort, gocheck.Equals, "")
	c.Assert(err, gocheck.NotNil)
	c.Assert(err.Error(), gocheck.Equals, "Container port 8888 is not mapped to any host port")
}
Exemple #5
0
func (s *S) TestContainerNetworkInfo(c *check.C) {
	inspectOut := `{
	"NetworkSettings": {
		"IpAddress": "10.10.10.10",
		"IpPrefixLen": 8,
		"Gateway": "10.65.41.1",
		"Ports": {}
	}
}`
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if strings.Contains(r.URL.Path, "/containers/") {
			w.Write([]byte(inspectOut))
		}
	}))
	defer server.Close()
	var storage cluster.MapStorage
	storage.StoreContainer("c-01", server.URL)
	p, err := newFakeDockerProvisioner(server.URL)
	c.Assert(err, check.IsNil)
	p.cluster, err = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL},
	)
	c.Assert(err, check.IsNil)
	container := Container{ID: "c-01"}
	info, err := container.NetworkInfo(p)
	c.Assert(err, check.IsNil)
	c.Assert(info.IP, check.Equals, "10.10.10.10")
	c.Assert(info.HTTPHostPort, check.Equals, "")
}
Exemple #6
0
func (s *S) TestGetHostAddr(c *gocheck.C) {
	cmutex.Lock()
	old := dCluster
	var err error
	dCluster, err = cluster.New(nil, &mapStorage{},
		cluster.Node{ID: "server0", Address: "http://localhost:8081"},
		cluster.Node{ID: "server20", Address: "http://localhost:3234"},
		cluster.Node{ID: "server21", Address: "http://10.10.10.10:4243"},
	)
	c.Assert(err, gocheck.IsNil)
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		dCluster = old
		cmutex.Unlock()
	}()
	var tests = []struct {
		input    string
		expected string
	}{
		{"server0", "localhost"},
		{"server20", "localhost"},
		{"server21", "10.10.10.10"},
		{"server33", ""},
	}
	for _, t := range tests {
		c.Check(getHostAddr(t.input), gocheck.Equals, t.expected)
	}
}
Exemple #7
0
func (s *HandlersSuite) TestAutoScaleRunHandler(c *check.C) {
	mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{},
		cluster.Node{Address: "localhost:1999", Metadata: map[string]string{
			"pool": "pool1",
		}},
	)
	config.Set("docker:auto-scale:group-by-metadata", "pool")
	config.Set("docker:auto-scale:max-container-count", 2)
	defer config.Unset("docker:auto-scale:max-container-count")
	defer config.Unset("docker:auto-scale:group-by-metadata")
	recorder := httptest.NewRecorder()
	request, err := http.NewRequest("POST", "/docker/autoscale/run", nil)
	c.Assert(err, check.IsNil)
	request.Header.Set("Authorization", "bearer "+s.token.GetValue())
	server := api.RunServer(true)
	server.ServeHTTP(recorder, request)
	c.Assert(recorder.Code, check.Equals, http.StatusOK)
	body := recorder.Body.String()
	parts := strings.Split(body, "\n")
	c.Assert(parts, check.DeepEquals, []string{
		`{"Message":"[node autoscale] running scaler *docker.countScaler for \"pool\": \"pool1\"\n"}`,
		`{"Message":"[node autoscale] nothing to do for \"pool\": \"pool1\"\n"}`,
		``,
	})
}
Exemple #8
0
func (s *HandlersSuite) TestListContainersByHostHandler(c *check.C) {
	var result []container
	var err error
	mainDockerProvisioner.cluster, err = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{})
	c.Assert(err, check.IsNil)
	coll := mainDockerProvisioner.collection()
	defer coll.Close()
	err = coll.Insert(container{ID: "blabla", Type: "python", HostAddr: "http://cittavld1182.globoi.com"})
	c.Assert(err, check.IsNil)
	defer coll.Remove(bson.M{"id": "blabla"})
	err = coll.Insert(container{ID: "bleble", Type: "java", HostAddr: "http://cittavld1182.globoi.com"})
	c.Assert(err, check.IsNil)
	defer coll.Remove(bson.M{"id": "bleble"})
	req, err := http.NewRequest("GET", "/node/cittavld1182.globoi.com/containers?:address=http://cittavld1182.globoi.com", nil)
	rec := httptest.NewRecorder()
	err = listContainersHandler(rec, req, nil)
	c.Assert(err, check.IsNil)
	body, err := ioutil.ReadAll(rec.Body)
	c.Assert(err, check.IsNil)
	err = json.Unmarshal(body, &result)
	c.Assert(err, check.IsNil)
	c.Assert(result[0].ID, check.DeepEquals, "blabla")
	c.Assert(result[0].Type, check.DeepEquals, "python")
	c.Assert(result[0].HostAddr, check.DeepEquals, "http://cittavld1182.globoi.com")
	c.Assert(result[1].ID, check.DeepEquals, "bleble")
	c.Assert(result[1].Type, check.DeepEquals, "java")
	c.Assert(result[1].HostAddr, check.DeepEquals, "http://cittavld1182.globoi.com")
}
Exemple #9
0
func (s *S) TestCollectStatusFixContainer(c *gocheck.C) {
	coll := collection()
	defer coll.Close()
	err := coll.Insert(
		container{
			ID:       "9930c24f1c4x",
			AppName:  "makea",
			Type:     "python",
			Status:   provision.StatusStarted.String(),
			IP:       "127.0.0.4",
			HostPort: "9025",
			HostAddr: "127.0.0.1",
		},
	)
	c.Assert(err, gocheck.IsNil)
	defer coll.RemoveAll(bson.M{"appname": "makea"})
	cleanup, server := startDocker()
	defer cleanup()
	var storage mapStorage
	storage.StoreContainer("9930c24f1c4x", "server0")
	cmutex.Lock()
	dCluster, err = cluster.New(nil, &storage,
		cluster.Node{ID: "server0", Address: server.URL},
	)
	cmutex.Unlock()
	c.Assert(err, gocheck.IsNil)
	var p dockerProvisioner
	err = p.CollectStatus()
	c.Assert(err, gocheck.IsNil)
	cont, err := getContainer("9930c24f1c4x")
	c.Assert(err, gocheck.IsNil)
	c.Assert(cont.IP, gocheck.Equals, "127.0.0.9")
	c.Assert(cont.HostPort, gocheck.Equals, "9999")
}
Exemple #10
0
func (s *S) TestHealerHealNodeWithoutIaaS(c *check.C) {
	node1, err := testing.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	cluster, err := cluster.New(nil, &cluster.MapStorage{},
		cluster.Node{Address: node1.URL()},
	)
	c.Assert(err, check.IsNil)
	var p dockerProvisioner
	err = p.Initialize()
	c.Assert(err, check.IsNil)
	p.cluster = cluster
	healer := nodeHealer{
		locks:                 make(map[string]*sync.Mutex),
		provisioner:           &p,
		disabledTime:          0,
		failuresBeforeHealing: 1,
		waitTimeNewMachine:    1 * time.Second,
	}
	nodes, err := p.getCluster().UnfilteredNodes()
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	created, err := healer.healNode(&nodes[0])
	c.Assert(err, check.ErrorMatches, ".*error creating new machine.*")
	c.Assert(created.Address, check.Equals, "")
	nodes, err = p.getCluster().UnfilteredNodes()
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	c.Assert(urlPort(nodes[0].Address), check.Equals, urlPort(node1.URL()))
	c.Assert(urlToHost(nodes[0].Address), check.Equals, "127.0.0.1")
}
Exemple #11
0
func (s *S) startMultipleServersClusterSeggregated() (*dockerProvisioner, error) {
	var err error
	s.extraServer, err = dtesting.NewServer("localhost:0", nil, nil)
	if err != nil {
		return nil, err
	}
	otherURL := strings.Replace(s.extraServer.URL(), "127.0.0.1", "localhost", 1)
	var p dockerProvisioner
	err = p.Initialize()
	if err != nil {
		return nil, err
	}
	opts := provision.AddPoolOptions{Name: "pool1", Public: true}
	err = provision.AddPool(opts)
	if err != nil {
		return nil, err
	}
	opts = provision.AddPoolOptions{Name: "pool2", Public: true}
	err = provision.AddPool(opts)
	if err != nil {
		return nil, err
	}
	p.storage = &cluster.MapStorage{}
	sched := segregatedScheduler{provisioner: &p}
	p.cluster, err = cluster.New(&sched, p.storage,
		cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "pool1"}},
		cluster.Node{Address: otherURL, Metadata: map[string]string{"pool": "pool2"}},
	)
	if err != nil {
		return nil, err
	}
	return &p, nil
}
Exemple #12
0
func (s *S) SetUpTest(c *check.C) {
	config.Set("docker:api-timeout", 2)
	iaas.ResetAll()
	repositorytest.Reset()
	queue.ResetQueue()
	repository.Manager().CreateUser(s.user.Email)
	s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
	err := s.p.Initialize()
	c.Assert(err, check.IsNil)
	queue.ResetQueue()
	s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	s.p.cluster, err = cluster.New(nil, s.p.storage,
		cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
	)
	c.Assert(err, check.IsNil)
	mainDockerProvisioner = s.p
	err = dbtest.ClearAllCollectionsExcept(s.storage.Apps().Database, []string{"users", "tokens", "teams"})
	c.Assert(err, check.IsNil)
	err = clearClusterStorage(s.clusterSess)
	c.Assert(err, check.IsNil)
	routertest.FakeRouter.Reset()
	opts := provision.AddPoolOptions{Name: "test-default", Default: true}
	err = provision.AddPool(opts)
	c.Assert(err, check.IsNil)
	s.storage.Tokens().Remove(bson.M{"appname": bson.M{"$ne": ""}})
	s.logBuf = safe.NewBuffer(nil)
	log.SetLogger(log.NewWriterLogger(s.logBuf, true))
	s.token = createTokenForUser(s.user, "*", string(permission.CtxGlobal), "", c)
}
Exemple #13
0
func (s *S) TestGetHostAddrWithSegregatedScheduler(c *gocheck.C) {
	conn, err := db.Conn()
	c.Assert(err, gocheck.IsNil)
	defer conn.Close()
	coll := conn.Collection(schedulerCollection)
	err = coll.Insert(
		node{ID: "server0", Address: "http://remotehost:8080", Teams: []string{"tsuru"}},
		node{ID: "server20", Address: "http://remotehost:8081", Teams: []string{"tsuru"}},
		node{ID: "server21", Address: "http://10.10.10.1:8082", Teams: []string{"tsuru"}},
	)
	defer coll.RemoveAll(bson.M{"_id": bson.M{"$in": []string{"server0", "server1", "server2"}}})
	cmutex.Lock()
	old := dCluster
	dCluster, err = cluster.New(segScheduler, &mapStorage{})
	c.Assert(err, gocheck.IsNil)
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		dCluster = old
		cmutex.Unlock()
	}()
	var tests = []struct {
		input    string
		expected string
	}{
		{"server0", "remotehost"},
		{"server20", "remotehost"},
		{"server21", "10.10.10.1"},
		{"server33", ""},
	}
	for _, t := range tests {
		c.Check(getHostAddr(t.input), gocheck.Equals, t.expected)
	}
}
Exemple #14
0
func (s *S) TestFixContainersEmptyPortDoesNothing(c *check.C) {
	cleanup, server, p := startDocker("")
	defer cleanup()
	coll := p.collection()
	defer coll.Close()
	err := coll.Insert(
		container{
			ID:       "9930c24f1c4x",
			AppName:  "makea",
			Type:     "python",
			Status:   provision.StatusStarted.String(),
			IP:       "",
			HostPort: "",
			HostAddr: "127.0.0.1",
		},
	)
	c.Assert(err, check.IsNil)
	defer coll.RemoveAll(bson.M{"appname": "makea"})
	var storage cluster.MapStorage
	storage.StoreContainer("9930c24f1c4x", server.URL)
	p.cluster, err = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL},
	)
	c.Assert(err, check.IsNil)
	err = p.fixContainers()
	c.Assert(err, check.IsNil)
	cont, err := p.getContainer("9930c24f1c4x")
	c.Assert(err, check.IsNil)
	c.Assert(cont.IP, check.Equals, "")
	c.Assert(cont.HostPort, check.Equals, "")
}
Exemple #15
0
func (s *S) TestCluster(c *check.C) {
	var p FakeDockerProvisioner
	cluster, err := cluster.New(nil, &cluster.MapStorage{})
	c.Assert(err, check.IsNil)
	p.cluster = cluster
	c.Assert(p.Cluster(), check.Equals, cluster)
}
Exemple #16
0
func (s *HandlersSuite) TestAddNodeHandlerCreatingAnIaasMachineExplicit(c *check.C) {
	server, waitQueue := s.startFakeDockerNode(c)
	defer server.Stop()
	iaas.RegisterIaasProvider("test-iaas", newTestIaaS)
	iaas.RegisterIaasProvider("another-test-iaas", newTestIaaS)
	mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{})
	opts := provision.AddPoolOptions{Name: "pool1"}
	err := provision.AddPool(opts)
	defer provision.RemovePool("pool1")
	b := bytes.NewBufferString(`{"pool": "pool1", "id": "test1", "iaas": "another-test-iaas"}`)
	req, err := http.NewRequest("POST", "/docker/node?register=false", b)
	c.Assert(err, check.IsNil)
	rec := httptest.NewRecorder()
	err = addNodeHandler(rec, req, nil)
	c.Assert(err, check.IsNil)
	waitQueue()
	nodes, err := mainDockerProvisioner.getCluster().Nodes()
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	c.Assert(nodes[0].Address, check.Equals, strings.TrimRight(server.URL(), "/"))
	c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
		"id":      "test1",
		"pool":    "pool1",
		"iaas":    "another-test-iaas",
		"iaas-id": "test1",
	})
}
Exemple #17
0
func (s *HandlersSuite) TestListNodeHandler(c *check.C) {
	var result struct {
		Nodes    []cluster.Node `json:"nodes"`
		Machines []iaas.Machine `json:"machines"`
	}
	var err error
	mainDockerProvisioner.cluster, err = cluster.New(nil, &cluster.MapStorage{})
	c.Assert(err, check.IsNil)
	_, err = mainDockerProvisioner.getCluster().Register("host1.com:2375", map[string]string{"pool": "pool1"})
	c.Assert(err, check.IsNil)
	_, err = mainDockerProvisioner.getCluster().Register("host2.com:2375", map[string]string{"pool": "pool2", "foo": "bar"})
	c.Assert(err, check.IsNil)
	req, err := http.NewRequest("GET", "/node/", nil)
	rec := httptest.NewRecorder()
	err = listNodeHandler(rec, req, nil)
	c.Assert(err, check.IsNil)
	body, err := ioutil.ReadAll(rec.Body)
	c.Assert(err, check.IsNil)
	err = json.Unmarshal(body, &result)
	c.Assert(err, check.IsNil)
	c.Assert(result.Nodes[0].Address, check.Equals, "host1.com:2375")
	c.Assert(result.Nodes[0].Metadata, check.DeepEquals, map[string]string{"pool": "pool1"})
	c.Assert(result.Nodes[1].Address, check.Equals, "host2.com:2375")
	c.Assert(result.Nodes[1].Metadata, check.DeepEquals, map[string]string{"pool": "pool2", "foo": "bar"})
}
Exemple #18
0
func (s *S) TestGetNodeByHost(c *check.C) {
	var p dockerProvisioner
	err := p.Initialize()
	c.Assert(err, check.IsNil)
	nodes := []cluster.Node{{
		Address: "http://h1:80",
	}, {
		Address: "http://h2:90",
	}, {
		Address: "http://h3",
	}, {
		Address: "h4",
	}, {
		Address: "h5:30123",
	}}
	p.cluster, err = cluster.New(nil, &cluster.MapStorage{}, nodes...)
	c.Assert(err, check.IsNil)
	tests := [][]string{
		{"h1", nodes[0].Address},
		{"h2", nodes[1].Address},
		{"h3", nodes[2].Address},
		{"h4", nodes[3].Address},
		{"h5", nodes[4].Address},
	}
	for _, t := range tests {
		var n cluster.Node
		n, err = p.GetNodeByHost(t[0])
		c.Assert(err, check.IsNil)
		c.Assert(n.Address, check.DeepEquals, t[1])
	}
	_, err = p.GetNodeByHost("h6")
	c.Assert(err, check.ErrorMatches, `node with host "h6" not found`)
}
Exemple #19
0
func (s *HandlersSuite) TestUpdateNodeHandler(c *check.C) {
	mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{},
		cluster.Node{Address: "localhost:1999", Metadata: map[string]string{
			"m1": "v1",
			"m2": "v2",
		}},
	)
	opts := provision.AddPoolOptions{Name: "pool1"}
	err := provision.AddPool(opts)
	defer provision.RemovePool("pool1")
	json := `{"address": "localhost:1999", "m1": "", "m2": "v9", "m3": "v8"}`
	b := bytes.NewBufferString(json)
	recorder := httptest.NewRecorder()
	request, err := http.NewRequest("PUT", "/docker/node", b)
	c.Assert(err, check.IsNil)
	request.Header.Set("Authorization", "bearer "+s.token.GetValue())
	server := api.RunServer(true)
	server.ServeHTTP(recorder, request)
	c.Assert(recorder.Code, check.Equals, http.StatusOK)
	nodes, err := mainDockerProvisioner.getCluster().Nodes()
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	c.Assert(nodes[0].Metadata, check.DeepEquals, map[string]string{
		"m2": "v9",
		"m3": "v8",
	})
}
Exemple #20
0
func (s *S) TestPushImage(c *check.C) {
	var requests []*http.Request
	server, err := testing.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
		requests = append(requests, r)
	})
	c.Assert(err, check.IsNil)
	defer server.Stop()
	config.Set("docker:registry", "localhost:3030")
	defer config.Unset("docker:registry")
	var p dockerProvisioner
	err = p.Initialize()
	c.Assert(err, check.IsNil)
	p.cluster, err = cluster.New(nil, &cluster.MapStorage{},
		cluster.Node{Address: server.URL()})
	c.Assert(err, check.IsNil)
	err = s.newFakeImage(&p, "localhost:3030/base/img", nil)
	c.Assert(err, check.IsNil)
	err = p.PushImage("localhost:3030/base/img", "")
	c.Assert(err, check.IsNil)
	c.Assert(requests, check.HasLen, 3)
	c.Assert(requests[0].URL.Path, check.Equals, "/images/create")
	c.Assert(requests[1].URL.Path, check.Equals, "/images/localhost:3030/base/img/json")
	c.Assert(requests[2].URL.Path, check.Equals, "/images/localhost:3030/base/img/push")
	c.Assert(requests[2].URL.RawQuery, check.Equals, "")
	err = s.newFakeImage(&p, "localhost:3030/base/img:v2", nil)
	c.Assert(err, check.IsNil)
	err = p.PushImage("localhost:3030/base/img", "v2")
	c.Assert(err, check.IsNil)
	c.Assert(requests, check.HasLen, 6)
	c.Assert(requests[3].URL.Path, check.Equals, "/images/create")
	c.Assert(requests[4].URL.Path, check.Equals, "/images/localhost:3030/base/img:v2/json")
	c.Assert(requests[5].URL.Path, check.Equals, "/images/localhost:3030/base/img/push")
	c.Assert(requests[5].URL.RawQuery, check.Equals, "tag=v2")
}
Exemple #21
0
func (s *HandlersSuite) TestListContainersByAppHandler(c *gocheck.C) {
	var result []container
	coll := collection()
	dCluster, _ = cluster.New(segScheduler, nil)
	err := coll.Insert(container{ID: "blabla", AppName: "appbla", HostAddr: "http://cittavld1182.globoi.com"})
	c.Assert(err, gocheck.IsNil)
	defer coll.Remove(bson.M{"id": "blabla"})
	err = coll.Insert(container{ID: "bleble", AppName: "appbla", HostAddr: "http://cittavld1180.globoi.com"})
	c.Assert(err, gocheck.IsNil)
	defer coll.Remove(bson.M{"id": "bleble"})
	req, err := http.NewRequest("GET", "/node/appbla/containers?:appname=appbla", nil)
	rec := httptest.NewRecorder()
	err = listContainersHandler(rec, req, nil)
	c.Assert(err, gocheck.IsNil)
	body, err := ioutil.ReadAll(rec.Body)
	c.Assert(err, gocheck.IsNil)
	err = json.Unmarshal(body, &result)
	c.Assert(err, gocheck.IsNil)
	c.Assert(result[0].ID, gocheck.DeepEquals, "blabla")
	c.Assert(result[0].AppName, gocheck.DeepEquals, "appbla")
	c.Assert(result[0].HostAddr, gocheck.DeepEquals, "http://cittavld1182.globoi.com")
	c.Assert(result[1].ID, gocheck.DeepEquals, "bleble")
	c.Assert(result[1].AppName, gocheck.DeepEquals, "appbla")
	c.Assert(result[1].HostAddr, gocheck.DeepEquals, "http://cittavld1180.globoi.com")
}
Exemple #22
0
func (s *HandlersSuite) TestAutoScaleRunHandler(c *check.C) {
	mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{},
		cluster.Node{Address: "localhost:1999", Metadata: map[string]string{
			"pool": "pool1",
		}},
	)
	config.Set("docker:auto-scale:enabled", true)
	defer config.Unset("docker:auto-scale:enabled")
	config.Set("docker:auto-scale:max-container-count", 2)
	defer config.Unset("docker:auto-scale:max-container-count")
	recorder := httptest.NewRecorder()
	request, err := http.NewRequest("POST", "/docker/autoscale/run", nil)
	c.Assert(err, check.IsNil)
	request.Header.Set("Authorization", "bearer "+s.token.GetValue())
	server := api.RunServer(true)
	server.ServeHTTP(recorder, request)
	c.Assert(recorder.Code, check.Equals, http.StatusOK)
	c.Assert(recorder.Header().Get("Content-Type"), check.Equals, "application/x-json-stream")
	body := recorder.Body.String()
	parts := strings.Split(body, "\n")
	c.Assert(parts, check.DeepEquals, []string{
		`{"Message":"running scaler *docker.countScaler for \"pool\": \"pool1\"\n"}`,
		`{"Message":"nothing to do for \"pool\": \"pool1\"\n"}`,
		``,
	})
	c.Assert(eventtest.EventDesc{
		Target: event.Target{Type: event.TargetTypePool},
		Owner:  s.token.GetUserName(),
		Kind:   "node.autoscale.update.run",
	}, eventtest.HasEvent)
}
Exemple #23
0
func (s *S) TestGetHostAddrWithSegregatedScheduler(c *gocheck.C) {
	conn, err := db.Conn()
	c.Assert(err, gocheck.IsNil)
	defer conn.Close()
	coll := conn.Collection(schedulerCollection)
	p := Pool{Name: "pool1", Nodes: []string{
		"http://remotehost:8080",
		"http://remotehost:8081",
		"http://10.10.10.1:8082",
	}}
	err = coll.Insert(p)
	defer coll.RemoveAll(bson.M{"_id": p.Name})
	cmutex.Lock()
	old := dCluster
	dCluster, err = cluster.New(segScheduler, &mapStorage{})
	c.Assert(err, gocheck.IsNil)
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		dCluster = old
		cmutex.Unlock()
	}()
	var tests = []struct {
		input    string
		expected string
	}{
		{"http://remotehost:8080", "remotehost"},
		{"http://remotehost:8081", "remotehost"},
		{"http://10.10.10.1:8082", "10.10.10.1"},
		{"server33", ""},
	}
	for _, t := range tests {
		c.Check(getHostAddr(t.input), gocheck.Equals, t.expected)
	}
}
func (s *HandlersSuite) TestAddNodeHandlerCreatingAnIaasMachine(c *gocheck.C) {
	iaas.RegisterIaasProvider("test-iaas", TestIaaS{})
	dCluster, _ = cluster.New(segregatedScheduler{}, &cluster.MapStorage{})
	p := Pool{Name: "pool1"}
	s.conn.Collection(schedulerCollection).Insert(p)
	defer s.conn.Collection(schedulerCollection).RemoveId("pool1")
	b := bytes.NewBufferString(`{"pool": "pool1", "id": "test1"}`)
	req, err := http.NewRequest("POST", "/docker/node?register=false", b)
	c.Assert(err, gocheck.IsNil)
	rec := httptest.NewRecorder()
	err = addNodeHandler(rec, req, nil)
	c.Assert(err, gocheck.IsNil)
	var result map[string]string
	err = json.NewDecoder(rec.Body).Decode(&result)
	c.Assert(err, gocheck.IsNil)
	c.Assert(result, gocheck.DeepEquals, map[string]string{"description": "my iaas description"})
	nodes, err := dCluster.Nodes()
	c.Assert(err, gocheck.IsNil)
	c.Assert(nodes, gocheck.HasLen, 1)
	c.Assert(nodes[0].Address, gocheck.Equals, "http://test1.fake.host:1234")
	c.Assert(nodes[0].Metadata, gocheck.DeepEquals, map[string]string{
		"id":   "test1",
		"pool": "pool1",
		"iaas": "test-iaas",
	})
}
Exemple #25
0
func (s *S) TestProvisionerPlatformAdd(c *gocheck.C) {
	var requests []*http.Request
	server, err := dtesting.NewServer("127.0.0.1:0", func(r *http.Request) {
		requests = append(requests, r)
	})
	c.Assert(err, gocheck.IsNil)
	defer server.Stop()
	config.Set("docker:registry", "localhost:3030")
	defer config.Unset("docker:registry")
	var storage mapStorage
	storage.StoreImage("localhost:3030/base", "server0")
	cmutex.Lock()
	oldDockerCluster := dCluster
	dCluster, _ = cluster.New(nil, &storage,
		cluster.Node{ID: "server0", Address: server.URL()})
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		dCluster = oldDockerCluster
		cmutex.Unlock()
	}()
	args := make(map[string]string)
	args["dockerfile"] = "http://localhost/Dockerfile"
	p := dockerProvisioner{}
	err = p.PlatformAdd("test", args, bytes.NewBuffer(nil))
	c.Assert(err, gocheck.IsNil)
	c.Assert(requests, gocheck.HasLen, 2)
	queryString := requests[0].URL.Query()
	c.Assert(queryString.Get("t"), gocheck.Equals, assembleImageName("test"))
	c.Assert(queryString.Get("remote"), gocheck.Equals, "http://localhost/Dockerfile")
}
func (s *HandlersSuite) TestFixContainerHandler(c *gocheck.C) {
	coll := collection()
	defer coll.Close()
	err := coll.Insert(
		container{
			ID:       "9930c24f1c4x",
			AppName:  "makea",
			Type:     "python",
			Status:   provision.StatusStarted.String(),
			IP:       "127.0.0.4",
			HostPort: "9025",
			HostAddr: "127.0.0.1",
		},
	)
	c.Assert(err, gocheck.IsNil)
	defer coll.RemoveAll(bson.M{"appname": "makea"})
	cleanup, server := startDocker()
	defer cleanup()
	var storage cluster.MapStorage
	storage.StoreContainer("9930c24f1c4x", server.URL)
	cmutex.Lock()
	dCluster, err = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL},
	)
	cmutex.Unlock()
	request, err := http.NewRequest("POST", "/fix-containers", nil)
	c.Assert(err, gocheck.IsNil)
	recorder := httptest.NewRecorder()
	err = fixContainersHandler(recorder, request, nil)
	c.Assert(err, gocheck.IsNil)
	cont, err := getContainer("9930c24f1c4x")
	c.Assert(err, gocheck.IsNil)
	c.Assert(cont.IP, gocheck.Equals, "127.0.0.9")
	c.Assert(cont.HostPort, gocheck.Equals, "9999")
}
func (s *S) TestDockerCluster(c *gocheck.C) {
	config.Set("docker:servers", []string{"http://localhost:4243", "http://10.10.10.10:4243"})
	defer config.Unset("docker:servers")
	nodes, err := dCluster.Nodes()
	c.Assert(err, gocheck.IsNil)
	cmutex.Lock()
	dCluster = nil
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		defer cmutex.Unlock()
		dCluster, err = cluster.New(nil, &cluster.MapStorage{}, nodes...)
		c.Assert(err, gocheck.IsNil)
	}()
	config.Set("docker:cluster:redis-server", "127.0.0.1:6379")
	defer config.Unset("docker:cluster:redis-server")
	clus := dockerCluster()
	c.Assert(clus, gocheck.NotNil)
	currentNodes, err := clus.Nodes()
	c.Assert(err, gocheck.IsNil)
	sortedNodes := NodeList(currentNodes)
	sort.Sort(sortedNodes)
	c.Assert(sortedNodes, gocheck.DeepEquals, NodeList([]cluster.Node{
		{Address: "http://10.10.10.10:4243", Metadata: map[string]string{}},
		{Address: "http://localhost:4243", Metadata: map[string]string{}},
	}))
}
Exemple #28
0
func (s *S) TestSchedulerNoNodesWithDefaultPool(c *check.C) {
	provision.RemovePool("test-default")
	app := app.App{Name: "bill", Teams: []string{"jean"}}
	err := s.storage.Apps().Insert(app)
	c.Assert(err, check.IsNil)
	defer s.storage.Apps().Remove(bson.M{"name": app.Name})
	scheduler := segregatedScheduler{provisioner: s.p}
	clusterInstance, err := cluster.New(&scheduler, &cluster.MapStorage{})
	c.Assert(err, check.IsNil)
	o := provision.AddPoolOptions{Name: "mypool"}
	err = provision.AddPool(o)
	c.Assert(err, check.IsNil)
	o = provision.AddPoolOptions{Name: "mypool2"}
	err = provision.AddPool(o)
	c.Assert(err, check.IsNil)
	defer provision.RemovePool("mypool")
	defer provision.RemovePool("mypool2")
	provision.AddTeamsToPool("mypool", []string{"jean"})
	provision.AddTeamsToPool("mypool2", []string{"jean"})
	opts := docker.CreateContainerOptions{}
	schedOpts := []string{app.Name, "web"}
	node, err := scheduler.Schedule(clusterInstance, opts, schedOpts)
	c.Assert(node.Address, check.Equals, "")
	c.Assert(err, check.NotNil)
	c.Assert(err.Error(), check.Matches, "No nodes found with one of the following metadata: pool=mypool, pool=mypool2")
}
Exemple #29
0
func (s *S) TestSchedulerScheduleByTeamOwner(c *check.C) {
	a1 := app.App{Name: "impius", Teams: []string{}, TeamOwner: "tsuruteam"}
	cont1 := container.Container{ID: "1", Name: "impius1", AppName: a1.Name}
	err := s.storage.Apps().Insert(a1)
	c.Assert(err, check.IsNil)
	defer s.storage.Apps().RemoveAll(bson.M{"name": a1.Name})
	p := provision.Pool{Name: "pool1", Teams: []string{"tsuruteam"}}
	o := provision.AddPoolOptions{Name: p.Name}
	err = provision.AddPool(o)
	c.Assert(err, check.IsNil)
	defer provision.RemovePool(p.Name)
	err = provision.AddTeamsToPool(p.Name, p.Teams)
	c.Assert(err, check.IsNil)
	contColl := s.p.Collection()
	defer contColl.Close()
	err = contColl.Insert(cont1)
	c.Assert(err, check.IsNil)
	defer contColl.RemoveAll(bson.M{"name": cont1.Name})
	scheduler := segregatedScheduler{provisioner: s.p}
	clusterInstance, err := cluster.New(&scheduler, &cluster.MapStorage{})
	s.p.cluster = clusterInstance
	c.Assert(err, check.IsNil)
	err = clusterInstance.Register(cluster.Node{
		Address:  s.server.URL(),
		Metadata: map[string]string{"pool": "pool1"},
	})
	c.Assert(err, check.IsNil)
	opts := docker.CreateContainerOptions{Name: cont1.Name}
	node, err := scheduler.Schedule(clusterInstance, opts, []string{a1.Name, "web"})
	c.Assert(err, check.IsNil)
	c.Check(node.Address, check.Equals, s.server.URL())
}
Exemple #30
0
func (s *S) TestProvisionerPlatformRemoveReturnsStorageError(c *gocheck.C) {
	registryServer := httptest.NewServer(nil)
	u, _ := url.Parse(registryServer.URL)
	config.Set("docker:registry", u.Host)
	defer config.Unset("docker:registry")
	var requests []*http.Request
	server, err := dtesting.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
		requests = append(requests, r)
	})
	c.Assert(err, gocheck.IsNil)
	defer server.Stop()
	var storage cluster.MapStorage
	cmutex.Lock()
	oldDockerCluster := dCluster
	dCluster, _ = cluster.New(nil, &storage,
		cluster.Node{Address: server.URL()})
	cmutex.Unlock()
	defer func() {
		cmutex.Lock()
		dCluster = oldDockerCluster
		cmutex.Unlock()
	}()
	p := dockerProvisioner{}
	err = p.PlatformRemove("test")
	c.Assert(err, gocheck.NotNil)
	c.Assert(err, gocheck.DeepEquals, dstorage.ErrNoSuchImage)
}