func (s *IntegrationTestSuite) assertContainerStartsAndExits(c *chk.C, start time.Time, id containers.Identifier) { hasStarted := func() bool { _, inactiveEnd, activeStart, _ := s.unitTimes(id) if inactiveEnd.IsZero() || activeStart.IsZero() { c.Logf("Variables empty before") } if inactiveEnd.Before(start) || activeStart.Before(start) { return false } return true } if !until(TimeoutContainerStateChange, IntervalContainerCheck, hasStarted) { c.Errorf("The service did not start in the allotted time") c.FailNow() } hasCompleted := func() bool { switch active, _ := s.unitState(id); active { case "active", "activating", "deactivating": return false } return true } if !until(TimeoutContainerStateChange, IntervalContainerCheck, hasCompleted) { c.Errorf("The service did not finish in the allotted time") c.FailNow() } }
func (s *IntegrationTestSuite) TestLongContainerName(c *chk.C) { id, err := containers.NewIdentifier("IntTest006xxxxxxxxxxxxxx") c.Assert(err, chk.IsNil) s.containerIds = append(s.containerIds, id) hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id) cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, "--start", "--ports=8080:0", "--isolate") data, err := cmd.CombinedOutput() c.Log(string(data)) c.Assert(err, chk.IsNil) s.assertContainerStarts(c, id) s.assertFilePresent(c, id.UnitPathFor(), 0664, true) s.assertFilePresent(c, filepath.Join(id.RunPathFor(), "container-init.sh"), 0700, false) ports, err := containers.GetExistingPorts(id) c.Assert(err, chk.IsNil) c.Assert(len(ports), chk.Equals, 1) httpAlive := func() bool { resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v", ports[0].External)) if err == nil { c.Assert(resp.StatusCode, chk.Equals, 200) return true } return false } if !until(TimeoutContainerStateChange, IntervalHttpCheck, httpAlive) { c.Errorf("Unable to retrieve a 200 status code from port %d", ports[0].External) c.FailNow() } }
func (s *XMouseAreaTestSuite) Test_connectMotionOut(c *C.C) { ch := make(chan struct{}) xmouseArea.connectMotionOut(func(_, _ int32, id string) { close(ch) }) xmouseArea.RegisterFullScreen() mockXMouseArea.emitMotionOut(0, 0, "0") select { case <-ch: case <-time.After(time.Second): c.FailNow() } }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func (s *S) TestStresstest(c *gocheck.C) { if skipStress { c.Skip("Skipped stresstest") } LogLevel = 1 p := NewParser(strings.NewReader(backgroundSuite), "background") var background *Suite var err os.Error background, err = p.ReadSuite() if err != nil { c.Fatalf("Cannot read suite: %s", err.String()) } p = NewParser(strings.NewReader(stressSuite), "suite") var suite *Suite suite, err = p.ReadSuite() if err != nil { c.Fatalf("Cannot read suite: %s", err.String()) } r0 := suite.Stresstest(background, 0, 3, 100) r10 := suite.Stresstest(background, 10, 3, 100) r30 := suite.Stresstest(background, 30, 2, 100) time.Sleep(100000000) r60 := suite.Stresstest(background, 60, 1, 100) time.Sleep(100000000) r100 := suite.Stresstest(background, 100, 1, 100) time.Sleep(200000000) r150 := suite.Stresstest(background, 150, 1, 100) time.Sleep(200000000) r200 := suite.Stresstest(background, 200, 5, 100) time.Sleep(200000000) testPrintStResult("Load 0", r0) testPrintStResult("Load 10", r10) testPrintStResult("Load 30", r30) testPrintStResult("Load 60", r60) testPrintStResult("Load 100", r100) testPrintStResult("Load 150", r150) testPrintStResult("Load 200", r200) if r0.Total <= 0 || r0.N <= 0 { c.Error("No tests run without load") c.FailNow() } if r0.Fail > 0 || r0.Err > 0 { c.Error("Failures without load") c.FailNow() } }
func (s *FoundationS) TestFailNow(c *gocheck.C) { defer (func() { if !c.Failed() { c.Error("FailNow() didn't fail the test") } else { c.Succeed() if c.GetTestLog() != "" { c.Error("Something got logged:\n" + c.GetTestLog()) } } })() c.FailNow() c.Log("FailNow() didn't stop the test") }
func assertEntitiesEqual(c *gc.C, got, want []params.EntityInfo) { if len(got) == 0 { got = nil } if len(want) == 0 { want = nil } if reflect.DeepEqual(got, want) { return } c.Errorf("entity mismatch; got len %d; want %d", len(got), len(want)) c.Logf("got:") for _, e := range got { c.Logf("\t%T %#v", e, e) } c.Logf("expected:") for _, e := range want { c.Logf("\t%T %#v", e, e) } c.FailNow() }
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, CONTAINER_CHECK_INTERVAL, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) } }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) //c.Errorf("Container %s is not running %+v", id, container) //c.FailNow() } }
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func (s *IntegrationTestSuite) assertContainerStops(c *chk.C, id containers.Identifier, allowFail bool) { active, _ := s.unitState(id) switch active { case "active", "activating": c.Errorf("Container %s stop not properly queued, service is still active - %s", id, active) c.FailNow() return } isStopped := func() bool { active, sub := s.unitState(id) if active == "inactive" { return true } if allowFail && active == "failed" { return true } if active == "deactivating" { return false } c.Errorf("Unit %s stop failed (%s) with state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStopped) { c.Errorf("Timeout during start of %s, never got to 'inactive' state", id) c.FailNow() } _, err := s.dockerClient.InspectContainer(id.ContainerFor()) if err == nil { c.Errorf("Container %s is still active in docker, should be stopped and removed", id.ContainerFor()) c.FailNow() } }
// writeTempFile creates a temporary file and writes the given text to it, // returning the os.File for further use. func writeTempFile(text string, c *gc.C) *os.File { file, err := ioutil.TempFile("", "parser_test-") if err != nil { c.Fatal(err.Error()) c.FailNow() } _, err = file.WriteString(text) if err != nil { c.Fatal(err.Error()) c.FailNow() } err = file.Close() if err != nil { c.Fatal(err.Error()) c.FailNow() } return file }
func (s *IntegrationTestSuite) assertContainerState(c *chk.C, id containers.Identifier, expectedState ContainerState) { var ( curState ContainerState didStop bool didRestart bool ticker *time.Ticker ) ticker = time.NewTicker(time.Second / 10) defer ticker.Stop() cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) if curState != expectedState { for true { select { case <-ticker.C: cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED if didStop { didRestart = true } case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) case <-time.After(CONTAINER_STATE_CHANGE_TIMEOUT): c.Logf("%v %v", didStop, didRestart) c.Log("Timed out during state change") c.Assert(1, chk.Equals, 2) } if (curState == expectedState) || (expectedState == CONTAINER_RESTARTED && didRestart == true) { break } } } switch { case expectedState == CONTAINER_STOPPED: for true { select { case <-ticker.C: _, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to stop") c.FailNow() } } case expectedState == CONTAINER_STARTED || expectedState == CONTAINER_RESTARTED: for true { select { case <-ticker.C: container, err := s.dockerClient.GetContainer(id.ContainerFor(), true) if err != nil { continue } c.Logf("Container state: %v. Info: %v", container.State.Running, container.State) if container.State.Running { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to start") c.FailNow() } } } }
func (s *IntegrationTestSuite) TestInstallVolume(c *chk.C) { id, err := containers.NewIdentifier("TestInstallVolume") c.Assert(err, chk.IsNil) s.containerIds = append(s.containerIds, id) hostContainerId := fmt.Sprintf("%v/%v", s.daemonURI, id) mountPath, err := ioutil.TempDir("/tmp", "bind-rw") c.Assert(err, chk.IsNil) roMountPath, err := ioutil.TempDir("/tmp", "bind-ro") c.Assert(err, chk.IsNil) roTestFilePath := path.Join(roMountPath, "ro-test") ioutil.WriteFile(roTestFilePath, []byte{}, 0664) cmd := exec.Command("/usr/bin/gear", "install", TestImage, hostContainerId, fmt.Sprintf("--volumes=/test-volume,%s:/test-bind-ro:ro,%s:/test-bind-rw", roMountPath, mountPath), "--ports=8080:0", "--start") data, err := cmd.CombinedOutput() c.Log(string(data)) c.Assert(err, chk.IsNil) s.assertContainerStarts(c, id) oldPid := s.getContainerPid(id) ports, err := containers.GetExistingPorts(id) c.Assert(err, chk.IsNil) c.Assert(len(ports), chk.Equals, 1) httpAlive := func() bool { resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%v", ports[0].External)) if err == nil { c.Assert(resp.StatusCode, chk.Equals, 200) return true } return false } if !until(TimeoutContainerStateChange, IntervalHttpCheck, httpAlive) { c.Errorf("Unable to retrieve a 200 status code from port %d", ports[0].External) c.FailNow() } exitCode, err := namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "ls", "/test-bind-ro/ro-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "touch", "/test-bind-ro/rw-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Not(chk.Equals), 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "touch", "/test-bind-rw/rw-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "touch", "/test-volume/rw-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "touch", "/tmp/transient-file"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) cmd = exec.Command("/usr/bin/gear", "restart", hostContainerId) data, err = cmd.CombinedOutput() c.Log(string(data)) c.Assert(err, chk.IsNil) s.assertContainerRestarts(c, id) newPid := s.getContainerPid(id) c.Assert(oldPid, chk.Not(chk.Equals), newPid) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "ls", "/test-bind-rw/rw-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "ls", "/test-volume/rw-test"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Equals, 0) exitCode, err = namespace.RunCommandInContainer(s.dockerClient, "TestInstallVolume", []string{"/bin/busybox", "ls", "/tmp/transient-file"}, []string{}) c.Assert(err, chk.IsNil) c.Assert(exitCode, chk.Not(chk.Equals), 0) }