func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, CONTAINER_CHECK_INTERVAL, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) } }
func switchnsExec(args []string) { var err error client, err := docker.GetConnection("unix:///var/run/docker.sock") if err != nil { fmt.Printf("Unable to connect to server\n") os.Exit(3) } uid := os.Getuid() if uid == 0 { runCommandInContainer(client, containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { fmt.Printf("Couldn't lookup uid %s\n", uid) os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { fmt.Printf("Couldn't get identifier from user: %v\n", u) os.Exit(2) } runCommandInContainer(client, containerId.ContainerFor(), []string{"/bin/sh", "-l"}, []string{}) } }
func (s *IntegrationTestSuite) getContainerPid(id containers.Identifier) int { container, err := s.dockerClient.InspectContainer(id.ContainerFor()) if err != nil { return 0 } return container.State.Pid }
func InitPostStart(dockerSocket string, id containers.Identifier) error { var ( u *user.User container *dc.Container err error d *docker.DockerClient ) if u, err = user.Lookup(id.LoginFor()); err == nil { if err := ssh.GenerateAuthorizedKeysFor(u, true, false); err != nil { log.Print(err.Error()) } } else { log.Print(err.Error()) } if d, err = docker.GetConnection(dockerSocket); err != nil { return err } if file, err := os.Open(id.NetworkLinksPathFor()); err == nil { defer file.Close() const ContainerInterval = time.Second / 3 const ContainerWait = time.Second * 12 for i := 0; i < int(ContainerWait/ContainerInterval); i++ { if container, err = d.GetContainer(id.ContainerFor(), true); err != nil { return err } if container.State.Running { break } else { log.Printf("Waiting for container to run.") time.Sleep(ContainerInterval) } } pid, err := d.ChildProcessForContainer(container) if err != nil { return err } if pid < 2 { return errors.New("support: child PID is not correct") } log.Printf("Updating network namespaces for %d", pid) if err := updateNamespaceNetworkLinks(pid, file); err != nil { return err } } return nil }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) //c.Errorf("Container %s is not running %+v", id, container) //c.FailNow() } }
func switchnsExec(cmd *cobra.Command, args []string) { var err error uid := os.Getuid() if uid == 0 { runCommand(containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { os.Exit(2) } runCommand(containerId.ContainerFor(), []string{"/bin/bash", "-l"}, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) } }
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func switchnsExec(args []string) { var err error uid := os.Getuid() if uid == 0 { runCommandInContainer(containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { fmt.Printf("Couldn't lookup uid %s\n", uid) os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { fmt.Printf("Couldn't get identifier from user: %v\n", u) os.Exit(2) } runCommandInContainer(containerId.ContainerFor(), []string{"/bin/bash", "-l"}, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) } }
func (s *IntegrationTestSuite) assertContainerStops(c *chk.C, id containers.Identifier, allowFail bool) { active, _ := s.unitState(id) switch active { case "active", "activating": c.Errorf("Container %s stop not properly queued, service is still active - %s", id, active) c.FailNow() return } isStopped := func() bool { active, sub := s.unitState(id) if active == "inactive" { return true } if allowFail && active == "failed" { return true } if active == "deactivating" { return false } c.Errorf("Unit %s stop failed (%s) with state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStopped) { c.Errorf("Timeout during start of %s, never got to 'inactive' state", id) c.FailNow() } _, err := s.dockerClient.InspectContainer(id.ContainerFor()) if err == nil { c.Errorf("Container %s is still active in docker, should be stopped and removed", id.ContainerFor()) c.FailNow() } }
func InitPostStart(dockerSocket string, id containers.Identifier) error { var ( u *user.User container *dc.Container err error d *docker.DockerClient ) if u, err = user.Lookup(id.LoginFor()); err == nil { if err := ssh.GenerateAuthorizedKeysFor(u, true, false); err != nil { log.Print(err.Error()) } } else { log.Print(err.Error()) } if d, err = docker.GetConnection(dockerSocket); err != nil { return err } if file, err := os.Open(id.NetworkLinksPathFor()); err == nil { defer file.Close() const ContainerInterval = time.Second / 10 const ContainerWait = time.Second * 15 for i := 0; i < int(ContainerWait/ContainerInterval); i++ { if container, err = d.InspectContainer(id.ContainerFor()); err != nil { if err == docker.ErrNoSuchContainer { //log.Printf("Waiting for container to be available.") time.Sleep(ContainerInterval) continue } return err } if container.State.Running && container.State.Pid != 0 { break } else { //log.Printf("Waiting for container to report available.") time.Sleep(ContainerInterval) } } if container == nil { return fmt.Errorf("container %s was not visible through Docker before timeout", id.ContainerFor()) } pid, err := d.ChildProcessForContainer(container) if err != nil { return err } if pid <= 1 { return errors.New("child PID is not correct") } log.Printf("Updating network namespaces for %d", pid) if err := updateNamespaceNetworkLinks(pid, file); err != nil { return err } } return nil }
func (s *IntegrationTestSuite) assertContainerState(c *chk.C, id containers.Identifier, expectedState ContainerState) { var ( curState ContainerState didStop bool didRestart bool ticker *time.Ticker ) ticker = time.NewTicker(time.Second / 10) defer ticker.Stop() cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) if curState != expectedState { for true { select { case <-ticker.C: cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED if didStop { didRestart = true } case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) case <-time.After(CONTAINER_STATE_CHANGE_TIMEOUT): c.Logf("%v %v", didStop, didRestart) c.Log("Timed out during state change") c.Assert(1, chk.Equals, 2) } if (curState == expectedState) || (expectedState == CONTAINER_RESTARTED && didRestart == true) { break } } } switch { case expectedState == CONTAINER_STOPPED: for true { select { case <-ticker.C: _, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to stop") c.FailNow() } } case expectedState == CONTAINER_STARTED || expectedState == CONTAINER_RESTARTED: for true { select { case <-ticker.C: container, err := s.dockerClient.GetContainer(id.ContainerFor(), true) if err != nil { continue } c.Logf("Container state: %v. Info: %v", container.State.Running, container.State) if container.State.Running { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to start") c.FailNow() } } } }
func (s *IntegrationTestSuite) getContainerPid(id containers.Identifier) int { container, _ := s.dockerClient.GetContainer(id.ContainerFor(), true) return container.State.Pid }