예제 #1
0
func (s *S) TestHealerHealNodeDestroyError(c *check.C) {
	factory, iaasInst := iaasTesting.NewHealerIaaSConstructorWithInst("addr1")
	iaasInst.DelErr = fmt.Errorf("my destroy error")
	iaas.RegisterIaasProvider("my-healer-iaas", factory)
	_, err := iaas.CreateMachineForIaaS("my-healer-iaas", map[string]string{})
	c.Assert(err, check.IsNil)
	iaasInst.Addr = "addr2"
	config.Set("iaas:node-protocol", "http")
	config.Set("iaas:node-port", 2)
	defer config.Unset("iaas:node-protocol")
	defer config.Unset("iaas:node-port")
	p := provisiontest.ProvisionerInstance
	err = p.AddNode(provision.AddNodeOptions{
		Address:  "http://addr1:1",
		Metadata: map[string]string{"iaas": "my-healer-iaas"},
	})
	c.Assert(err, check.IsNil)

	healer := newNodeHealer(nodeHealerArgs{
		WaitTimeNewMachine: time.Minute,
	})
	healer.Shutdown()
	nodes, err := p.ListNodes(nil)
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	c.Assert(nodes[0].Address(), check.Equals, "http://addr1:1")

	machines, err := iaas.ListMachines()
	c.Assert(err, check.IsNil)
	c.Assert(machines, check.HasLen, 1)
	c.Assert(machines[0].Address, check.Equals, "addr1")

	buf := bytes.Buffer{}
	log.SetLogger(log.NewWriterLogger(&buf, false))
	defer log.SetLogger(nil)
	created, err := healer.healNode(nodes[0])
	c.Assert(err, check.IsNil)
	c.Assert(created.Address, check.Equals, "http://addr2:2")
	c.Assert(buf.String(), check.Matches, "(?s).*my destroy error.*")

	nodes, err = p.ListNodes(nil)
	c.Assert(err, check.IsNil)
	c.Assert(nodes, check.HasLen, 1)
	c.Assert(nodes[0].Address(), check.Equals, "http://addr2:2")

	machines, err = iaas.ListMachines()
	c.Assert(err, check.IsNil)
	c.Assert(machines, check.HasLen, 1)
	c.Assert(machines[0].Address, check.Equals, "addr2")
}
예제 #2
0
파일: suite_test.go 프로젝트: tsuru/tsuru
func (s *S) SetUpTest(c *check.C) {
	config.Set("docker:api-timeout", 2)
	iaas.ResetAll()
	repositorytest.Reset()
	queue.ResetQueue()
	repository.Manager().CreateUser(s.user.Email)
	s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
	err := s.p.Initialize()
	c.Assert(err, check.IsNil)
	queue.ResetQueue()
	s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	s.p.cluster, err = cluster.New(nil, s.p.storage,
		cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
	)
	c.Assert(err, check.IsNil)
	mainDockerProvisioner = s.p
	err = dbtest.ClearAllCollectionsExcept(s.storage.Apps().Database, []string{"users", "tokens", "teams"})
	c.Assert(err, check.IsNil)
	err = clearClusterStorage(s.clusterSess)
	c.Assert(err, check.IsNil)
	routertest.FakeRouter.Reset()
	opts := provision.AddPoolOptions{Name: "test-default", Default: true}
	err = provision.AddPool(opts)
	c.Assert(err, check.IsNil)
	s.storage.Tokens().Remove(bson.M{"appname": bson.M{"$ne": ""}})
	s.logBuf = safe.NewBuffer(nil)
	log.SetLogger(log.NewWriterLogger(s.logBuf, true))
	s.token = createTokenForUser(s.user, "*", string(permission.CtxGlobal), "", c)
}
예제 #3
0
파일: event_test.go 프로젝트: tsuru/tsuru
func (s *S) TestEventDoneLogError(c *check.C) {
	logBuf := safe.NewBuffer(nil)
	log.SetLogger(log.NewWriterLogger(logBuf, false))
	defer log.SetLogger(nil)
	evt, err := New(&Opts{
		Target:  Target{Type: "app", Value: "myapp"},
		Kind:    permission.PermAppUpdateEnvSet,
		Owner:   s.token,
		Allowed: Allowed(permission.PermAppReadEvents),
	})
	c.Assert(err, check.IsNil)
	config.Set("database:url", "127.0.0.1:99999")
	err = evt.Done(nil)
	c.Assert(err, check.ErrorMatches, "no reachable servers")
	c.Assert(logBuf.String(), check.Matches, `(?s).*\[events\] error marking event as done - .*: no reachable servers.*`)
}
예제 #4
0
파일: suite_test.go 프로젝트: tsuru/tsuru
func (s *S) TearDownTest(c *check.C) {
	log.SetLogger(nil)
	s.server.Stop()
	if s.extraServer != nil {
		s.extraServer.Stop()
		s.extraServer = nil
	}
}
예제 #5
0
func (s *S) TestSchedulerScheduleWithMemoryAwarenessWithAutoScale(c *check.C) {
	config.Set("docker:auto-scale:enabled", true)
	defer config.Unset("docker:auto-scale:enabled")
	logBuf := bytes.NewBuffer(nil)
	log.SetLogger(log.NewWriterLogger(logBuf, false))
	defer log.SetLogger(nil)
	app1 := app.App{Name: "skyrim", Plan: app.Plan{Memory: 60000}, Pool: "mypool"}
	err := s.storage.Apps().Insert(app1)
	c.Assert(err, check.IsNil)
	defer s.storage.Apps().Remove(bson.M{"name": app1.Name})
	app2 := app.App{Name: "oblivion", Plan: app.Plan{Memory: 20000}, Pool: "mypool"}
	err = s.storage.Apps().Insert(app2)
	c.Assert(err, check.IsNil)
	defer s.storage.Apps().Remove(bson.M{"name": app2.Name})
	segSched := segregatedScheduler{
		maxMemoryRatio:      0.8,
		TotalMemoryMetadata: "totalMemory",
		provisioner:         s.p,
	}
	o := provision.AddPoolOptions{Name: "mypool"}
	err = provision.AddPool(o)
	c.Assert(err, check.IsNil)
	defer provision.RemovePool("mypool")
	server1, err := testing.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	defer server1.Stop()
	server2, err := testing.NewServer("127.0.0.1:0", nil, nil)
	c.Assert(err, check.IsNil)
	defer server2.Stop()
	localURL := strings.Replace(server2.URL(), "127.0.0.1", "localhost", -1)
	clusterInstance, err := cluster.New(&segSched, &cluster.MapStorage{},
		cluster.Node{Address: server1.URL(), Metadata: map[string]string{
			"totalMemory": "100000",
			"pool":        "mypool",
		}},
		cluster.Node{Address: localURL, Metadata: map[string]string{
			"totalMemory": "100000",
			"pool":        "mypool",
		}},
	)
	c.Assert(err, check.Equals, nil)
	s.p.cluster = clusterInstance
	cont1 := container.Container{ID: "pre1", Name: "existingUnit1", AppName: "skyrim", HostAddr: "127.0.0.1"}
	contColl := s.p.Collection()
	defer contColl.Close()
	defer contColl.RemoveAll(bson.M{"appname": "skyrim"})
	defer contColl.RemoveAll(bson.M{"appname": "oblivion"})
	err = contColl.Insert(cont1)
	c.Assert(err, check.Equals, nil)
	for i := 0; i < 5; i++ {
		cont := container.Container{ID: string(i), Name: fmt.Sprintf("unit%d", i), AppName: "oblivion"}
		err = contColl.Insert(cont)
		c.Assert(err, check.IsNil)
		opts := docker.CreateContainerOptions{
			Name: cont.Name,
		}
		node, err := segSched.Schedule(clusterInstance, opts, []string{cont.AppName, "web"})
		c.Assert(err, check.IsNil)
		c.Assert(node, check.NotNil)
	}
	n, err := contColl.Find(bson.M{"hostaddr": "127.0.0.1"}).Count()
	c.Assert(err, check.Equals, nil)
	c.Check(n, check.Equals, 2)
	n, err = contColl.Find(bson.M{"hostaddr": "localhost"}).Count()
	c.Assert(err, check.Equals, nil)
	c.Check(n, check.Equals, 4)
	n, err = contColl.Find(bson.M{"hostaddr": "127.0.0.1", "appname": "oblivion"}).Count()
	c.Assert(err, check.Equals, nil)
	c.Check(n, check.Equals, 1)
	n, err = contColl.Find(bson.M{"hostaddr": "localhost", "appname": "oblivion"}).Count()
	c.Assert(err, check.Equals, nil)
	c.Check(n, check.Equals, 4)
	cont := container.Container{ID: "post-error", Name: "post-error-1", AppName: "oblivion"}
	err = contColl.Insert(cont)
	c.Assert(err, check.IsNil)
	opts := docker.CreateContainerOptions{
		Name: cont.Name,
	}
	node, err := segSched.Schedule(clusterInstance, opts, []string{cont.AppName, "web"})
	c.Assert(err, check.IsNil)
	c.Assert(node, check.NotNil)
	c.Assert(logBuf.String(), check.Matches, `(?s).*WARNING: no nodes found with enough memory for container of "oblivion": 0.0191MB.*`)
}
예제 #6
0
func NewFakeLogger() log.Logger {
	var buf safe.Buffer
	l := &FakeLogger{Buf: buf}
	log.SetLogger(l)
	return l
}