func (s *IntegrationTestSuite) unitState(id containers.Identifier) (string, string) { props, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) if props == nil || err != nil { return "", "" } return props["ActiveState"].(string), props["SubState"].(string) }
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, CONTAINER_CHECK_INTERVAL, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) } }
func (s *IntegrationTestSuite) getContainerPid(id containers.Identifier) int { container, err := s.dockerClient.InspectContainer(id.ContainerFor()) if err != nil { return 0 } return container.State.Pid }
func switchnsExec(args []string) { var err error client, err := docker.GetConnection("unix:///var/run/docker.sock") if err != nil { fmt.Printf("Unable to connect to server\n") os.Exit(3) } uid := os.Getuid() if uid == 0 { runCommandInContainer(client, containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { fmt.Printf("Couldn't lookup uid %s\n", uid) os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { fmt.Printf("Couldn't get identifier from user: %v\n", u) os.Exit(2) } runCommandInContainer(client, containerId.ContainerFor(), []string{"/bin/sh", "-l"}, []string{}) } }
func GetSocketActivation(id containers.Identifier) (bool, string, error) { var err error var existing *os.File if existing, err = os.Open(id.UnitPathFor()); err != nil { return false, "disabled", err } defer existing.Close() return readSocketActivationFromUnitFile(existing) }
func (s *IntegrationTestSuite) unitTimes(id containers.Identifier) (inactiveStart time.Time, inactiveEnd time.Time, activeStart time.Time, activeEnd time.Time) { props, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) if props == nil || err != nil { return } inactiveStart = time.Unix(int64(props["InactiveEnterTimestampMonotonic"].(uint64)), 0) inactiveEnd = time.Unix(int64(props["InactiveExitTimestampMonotonic"].(uint64)), 0) activeStart = time.Unix(int64(props["ActiveEnterTimestampMonotonic"].(uint64)), 0) activeEnd = time.Unix(int64(props["ActiveExitTimestampMonotonic"].(uint64)), 0) return }
func SetUnitStartOnBoot(i containers.Identifier, active bool) error { if active { if err := os.Symlink(i.UnitPathFor(), activeUnitPathFor(i)); err != nil && !os.IsExist(err) { return err } } else { if err := os.Remove(activeUnitPathFor(i)); err != nil && !os.IsNotExist(err) { return err } } return nil }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func (s *IntegrationTestSuite) assertContainerStarts(c *chk.C, id containers.Identifier) { active, _ := s.unitState(id) switch active { case "active": return case "activating": break default: c.Errorf("Container %s failed to start - %s", id, active) c.FailNow() return } isRunning := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "activating" { return false } c.Errorf("Unit %s start failed with state %s", id, sub) c.FailNow() return false } if !until(CONTAINER_STATE_CHANGE_TIMEOUT, time.Second/20, isRunning) { c.Errorf("Timeout during start of %s, never got to 'active' state", id) c.FailNow() } container, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { c.Error("Can't check container "+id, err) c.FailNow() } if !container.State.Running { c.Logf("Container %s exists, but is not running - race condition %+v", id, container.State) //c.Errorf("Container %s is not running %+v", id, container) //c.FailNow() } }
func switchnsExec(cmd *cobra.Command, args []string) { var err error uid := os.Getuid() if uid == 0 { runCommand(containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { os.Exit(2) } runCommand(containerId.ContainerFor(), []string{"/bin/bash", "-l"}, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) } }
func (s *IntegrationTestSuite) assertContainerRestarts(c *chk.C, id containers.Identifier) { isStarted := func() bool { active, sub := s.unitState(id) if active == "active" { return true } if active == "deactivating" || active == "activating" { return false } c.Errorf("Unit %s restart failed (%s) in unexpected state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStarted) { active, sub := s.unitState(id) c.Errorf("Timeout during restart of %s, never got back to 'active' state (%s/%s)", id, active, sub) c.FailNow() } // Docker does not immediately return container status - possibly due to races inside of the // daemon failed := false isContainerUp := func() bool { done, err := isContainerAvailable(s.dockerClient, id.ContainerFor()) if err != nil { failed = true c.Error("Docker couldn't return container info", err) c.FailNow() } return done } if !until(TimeoutDockerWait, IntervalHttpCheck, isContainerUp) { if !failed { c.Errorf("Docker never reported the container running %s", id) } c.FailNow() } }
func switchnsExec(args []string) { var err error uid := os.Getuid() if uid == 0 { runCommandInContainer(containerName, args, envs) } else { var u *user.User var containerId containers.Identifier if u, err = user.LookupId(strconv.Itoa(uid)); err != nil { fmt.Printf("Couldn't lookup uid %s\n", uid) os.Exit(2) } if containerId, err = containers.NewIdentifierFromUser(u); err != nil { fmt.Printf("Couldn't get identifier from user: %v\n", u) os.Exit(2) } runCommandInContainer(containerId.ContainerFor(), []string{"/bin/bash", "-l"}, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) } }
func createUser(id containers.Identifier) error { cmd := exec.Command("/usr/sbin/useradd", id.LoginFor(), "-m", "-d", id.HomePath(), "-c", "Container user") if out, err := cmd.CombinedOutput(); err != nil { log.Println(out) return err } selinux.RestoreCon(id.HomePath(), true) return nil }
func (s *IntegrationTestSuite) assertContainerStops(c *chk.C, id containers.Identifier, allowFail bool) { active, _ := s.unitState(id) switch active { case "active", "activating": c.Errorf("Container %s stop not properly queued, service is still active - %s", id, active) c.FailNow() return } isStopped := func() bool { active, sub := s.unitState(id) if active == "inactive" { return true } if allowFail && active == "failed" { return true } if active == "deactivating" { return false } c.Errorf("Unit %s stop failed (%s) with state %s", id, active, sub) c.FailNow() return false } if !until(TimeoutContainerStateChange, IntervalContainerCheck, isStopped) { c.Errorf("Timeout during start of %s, never got to 'inactive' state", id) c.FailNow() } _, err := s.dockerClient.InspectContainer(id.ContainerFor()) if err == nil { c.Errorf("Container %s is still active in docker, should be stopped and removed", id.ContainerFor()) c.FailNow() } }
func InitPostStart(dockerSocket string, id containers.Identifier) error { var ( u *user.User container *dc.Container err error d *docker.DockerClient ) if u, err = user.Lookup(id.LoginFor()); err == nil { if err := ssh.GenerateAuthorizedKeysFor(u, true, false); err != nil { log.Print(err.Error()) } } else { log.Print(err.Error()) } if d, err = docker.GetConnection(dockerSocket); err != nil { return err } if file, err := os.Open(id.NetworkLinksPathFor()); err == nil { defer file.Close() const ContainerInterval = time.Second / 3 const ContainerWait = time.Second * 12 for i := 0; i < int(ContainerWait/ContainerInterval); i++ { if container, err = d.GetContainer(id.ContainerFor(), true); err != nil { return err } if container.State.Running { break } else { log.Printf("Waiting for container to run.") time.Sleep(ContainerInterval) } } pid, err := d.ChildProcessForContainer(container) if err != nil { return err } if pid < 2 { return errors.New("support: child PID is not correct") } log.Printf("Updating network namespaces for %d", pid) if err := updateNamespaceNetworkLinks(pid, file); err != nil { return err } } return nil }
func (idler *Idler) idleContainer(id containers.Identifier) bool { portPairs, err := containers.GetExistingPorts(id) if err != nil { fmt.Printf("idler.idleContainer: Error retrieving ports for container: %v\n", id) return false } iptablePorts, err := iptables.GetIdlerRules(id, false) if err != nil { fmt.Printf("idler.idleContainer: Error retrieving ports from iptables: %v\n", id) return false } shouldRecreateRules := false for _, portPair := range portPairs { extPort := strconv.Itoa(int(portPair.External)) shouldRecreateRules = shouldRecreateRules || !iptablePorts[extPort] } if !shouldRecreateRules { return false } //TODO: Ask geard to idle container f, err := os.Create(id.IdleUnitPathFor()) if err != nil { fmt.Printf("idler.idleContainer: Could not create idle marker for %s: %v", id.UnitNameFor(), err) return false } f.Close() if err := systemd.Connection().StopUnitJob(id.UnitNameFor(), "fail"); err != nil { fmt.Printf("idler.idleContainer: Could not stop container %s: %v", id.UnitNameFor(), err) return false } iptables.IdleContainer(id, idler.hostIp) return true }
func (idler *Idler) unidleContainer(id containers.Identifier, p netfilter.NFPacket) { newChanId, wasAlreadyAssigned := idler.getAvailableWaiter(id) if newChanId == 0 { fmt.Println("unidle: Error while finding wait channel") return } if !wasAlreadyAssigned { //TODO: Ask geard to unidle container if err := os.Remove(id.IdleUnitPathFor()); err != nil { fmt.Printf("unidle: Could not remove idle marker for %s: %v", id.UnitNameFor(), err) p.SetVerdict(netfilter.NF_ACCEPT) return } if err := systemd.Connection().StartUnitJob(id.UnitNameFor(), "fail"); err != nil { fmt.Printf("unidle: Could not start container %s: %v", id.UnitNameFor(), err) p.SetVerdict(netfilter.NF_ACCEPT) return } } p.SetRequeueVerdict(newChanId) }
func (s *IntegrationTestSuite) assertContainerState(c *chk.C, id containers.Identifier, expectedState ContainerState) { var ( curState ContainerState didStop bool didRestart bool ticker *time.Ticker ) ticker = time.NewTicker(time.Second / 10) defer ticker.Stop() cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) if curState != expectedState { for true { select { case <-ticker.C: cInfo, err := s.sdconn.GetUnitProperties(id.UnitNameFor()) c.Assert(err, chk.IsNil) switch cInfo["SubState"] { case "running": curState = CONTAINER_STARTED if didStop { didRestart = true } case "dead", "failed", "stop-sigterm", "stop": didStop = true curState = CONTAINER_STOPPED } c.Logf("Current state: %v, interpreted as %v", cInfo["SubState"], curState) case <-time.After(CONTAINER_STATE_CHANGE_TIMEOUT): c.Logf("%v %v", didStop, didRestart) c.Log("Timed out during state change") c.Assert(1, chk.Equals, 2) } if (curState == expectedState) || (expectedState == CONTAINER_RESTARTED && didRestart == true) { break } } } switch { case expectedState == CONTAINER_STOPPED: for true { select { case <-ticker.C: _, err := s.dockerClient.GetContainer(id.ContainerFor(), false) if err != nil { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to stop") c.FailNow() } } case expectedState == CONTAINER_STARTED || expectedState == CONTAINER_RESTARTED: for true { select { case <-ticker.C: container, err := s.dockerClient.GetContainer(id.ContainerFor(), true) if err != nil { continue } c.Logf("Container state: %v. Info: %v", container.State.Running, container.State) if container.State.Running { return } case <-time.After(DOCKER_STATE_CHANGE_TIMEOUT): c.Log("Timed out waiting for docker container to start") c.FailNow() } } } }
func InitPreStart(dockerSocket string, id containers.Identifier, imageName string) error { var ( err error imgInfo *dc.Image d *docker.DockerClient ) _, socketActivationType, err := containers.GetSocketActivation(id) if err != nil { fmt.Printf("init_pre_start: Error while parsing unit file: %v\n", err) return err } if _, err = user.Lookup(id.LoginFor()); err != nil { if _, ok := err.(user.UnknownUserError); !ok { return err } if err = createUser(id); err != nil { return err } } if d, err = docker.GetConnection(dockerSocket); err != nil { return err } if imgInfo, err = d.GetImage(imageName); err != nil { return err } if err := os.MkdirAll(id.HomePath(), 0700); err != nil { return err } u, _ := user.Lookup(id.LoginFor()) volumes := make([]string, 0, 10) for volPath := range imgInfo.Config.Volumes { volumes = append(volumes, volPath) } user := imgInfo.Config.User if user == "" { user = "******" } ports, err := containers.GetExistingPorts(id) if err != nil { fmt.Printf("container init pre-start: Unable to retrieve port mapping\n") return err } containerData := containers.ContainerInitScript{ imgInfo.Config.User == "", user, u.Uid, u.Gid, strings.Join(imgInfo.Config.Cmd, " "), len(volumes) > 0, strings.Join(volumes, " "), ports, socketActivationType == "proxied", } file, _, err := utils.OpenFileExclusive(path.Join(id.RunPathFor(), "container-init.sh"), 0700) if err != nil { fmt.Printf("container init pre-start: Unable to open script file: %v\n", err) return err } defer file.Close() if erre := containers.ContainerInitTemplate.Execute(file, containerData); erre != nil { fmt.Printf("container init pre-start: Unable to output template: ", erre) return erre } if err := file.Close(); err != nil { return err } file, _, err = utils.OpenFileExclusive(path.Join(id.RunPathFor(), "container-cmd.sh"), 0705) if err != nil { fmt.Printf("container init pre-start: Unable to open cmd script file: %v\n", err) return err } defer file.Close() if erre := containers.ContainerCmdTemplate.Execute(file, containerData); erre != nil { fmt.Printf("container init pre-start: Unable to output cmd template: ", erre) return erre } if err := file.Close(); err != nil { return err } return nil }
func activeUnitPathFor(i containers.Identifier) string { return filepath.Join("/etc/systemd/system/container-active.target.wants", i.UnitNameFor()) }
// FIXME: Refactor into separate responsibilities for file creation, templating, and disk access func generateAuthorizedKeys(id containers.Identifier, u *user.User, forceCreate, printToStdOut bool) error { var ( err error sshKeys []string destFile *os.File srcFile *os.File w *bufio.Writer ) var authorizedKeysPortSpec string ports, err := containers.GetExistingPorts(id) if err != nil { fmt.Errorf("container init pre-start: Unable to retrieve port mapping") return err } for _, port := range ports { authorizedKeysPortSpec += fmt.Sprintf("permitopen=\"127.0.0.1:%v\",", port.External) } sshKeys, err = filepath.Glob(path.Join(SshAccessBasePath(id), "*")) if !printToStdOut { os.MkdirAll(id.HomePath(), 0700) os.Mkdir(path.Join(id.HomePath(), ".ssh"), 0700) authKeysPath := id.AuthKeysPathFor() if _, err = os.Stat(authKeysPath); err != nil { if !os.IsNotExist(err) { return err } } else { if forceCreate { os.Remove(authKeysPath) } else { return nil } } if destFile, err = os.Create(authKeysPath); err != nil { return err } defer destFile.Close() w = bufio.NewWriter(destFile) } else { w = bufio.NewWriter(os.Stdout) } for _, keyFile := range sshKeys { s, err := os.Stat(keyFile) if err != nil { continue } if s.IsDir() { continue } srcFile, err = os.Open(keyFile) defer srcFile.Close() w.WriteString(fmt.Sprintf("command=\"/usr/bin/switchns\",%vno-agent-forwarding,no-X11-forwarding ", authorizedKeysPortSpec)) io.Copy(w, srcFile) w.WriteString("\n") } w.Flush() if !printToStdOut { uid, _ := strconv.Atoi(u.Uid) gid, _ := strconv.Atoi(u.Gid) for _, path := range []string{ id.HomePath(), filepath.Join(id.HomePath(), ".ssh"), filepath.Join(id.HomePath(), ".ssh", "authorized_keys"), } { if err := os.Chown(path, uid, gid); err != nil { return err } } if err := selinux.RestoreCon(id.BaseHomePath(), true); err != nil { return err } } return nil }
func InitPostStart(dockerSocket string, id containers.Identifier) error { var ( u *user.User container *dc.Container err error d *docker.DockerClient ) if u, err = user.Lookup(id.LoginFor()); err == nil { if err := ssh.GenerateAuthorizedKeysFor(u, true, false); err != nil { log.Print(err.Error()) } } else { log.Print(err.Error()) } if d, err = docker.GetConnection(dockerSocket); err != nil { return err } if file, err := os.Open(id.NetworkLinksPathFor()); err == nil { defer file.Close() const ContainerInterval = time.Second / 10 const ContainerWait = time.Second * 15 for i := 0; i < int(ContainerWait/ContainerInterval); i++ { if container, err = d.InspectContainer(id.ContainerFor()); err != nil { if err == docker.ErrNoSuchContainer { //log.Printf("Waiting for container to be available.") time.Sleep(ContainerInterval) continue } return err } if container.State.Running && container.State.Pid != 0 { break } else { //log.Printf("Waiting for container to report available.") time.Sleep(ContainerInterval) } } if container == nil { return fmt.Errorf("container %s was not visible through Docker before timeout", id.ContainerFor()) } pid, err := d.ChildProcessForContainer(container) if err != nil { return err } if pid <= 1 { return errors.New("child PID is not correct") } log.Printf("Updating network namespaces for %d", pid) if err := updateNamespaceNetworkLinks(pid, file); err != nil { return err } } return nil }
func (s *IntegrationTestSuite) getContainerPid(id containers.Identifier) int { container, _ := s.dockerClient.GetContainer(id.ContainerFor(), true) return container.State.Pid }