// afterContainerReady waits for the cluster ready and then sends the struct{} // on the returned channel. Detection of cluster ready is very heuristic way, // just checking number of container which is needed for running cluster. func afterContainerReady(c dockerclient.Client) chan struct{} { doneCh := make(chan struct{}) // Marshaling to post filter as API request filterLocalMasterStr, err := json.Marshal(FilterLocalMaster) if err != nil { // Should not reach here.... panic(fmt.Sprintf( "Failed to marshal FilterLocalMaster: %s", err)) } ticker := time.NewTicker(CheckInterval) go func() { fmt.Fprintf(os.Stderr, "Wait until containers are readly") for _ = range ticker.C { fmt.Fprintf(os.Stderr, ".") // Get Container info from deamon based on fileter localMasters, err := c.ListContainers(true, false, (string)(filterLocalMasterStr)) if err != nil { // Just ignore error continue } if len(localMasters) > 3 { fmt.Fprintf(os.Stderr, "\n") doneCh <- struct{}{} ticker.Stop() } } }() return doneCh }
// BindingAddr provides the address for the container and binding. func BindingAddr(d dockerclient.Client, name, binding string) (string, error) { ci, err := d.InspectContainer(name) if err != nil { return "", stackerr.Wrap(err) } ip, err := dockerIP(d) if err != nil { return "", err } hostname, err := etcHostsName(ip) if err != nil { return "", err } if hostname == "" { hostname = ip.String() } addr := fmt.Sprintf( "%s:%s", hostname, ci.NetworkSettings.Ports[binding][0].HostPort, ) return addr, nil }
// removeContainers removes all containers parallelly. // It retuns error channel and if something wrong, error is sent there. func removeContainers(client dockerclient.Client, containers []dockerclient.Container, force, delVolume bool) (chan dockerclient.Container, chan error) { var wg sync.WaitGroup resultCh, errCh := make(chan dockerclient.Container), make(chan error) for _, container := range containers { wg.Add(1) go func(c dockerclient.Container) { defer wg.Done() if err := client.RemoveContainer(c.Id, force, delVolume); err != nil { errCh <- fmt.Errorf( "failed to remove %s (%s): %s", c.Names[0], c.Id, err) return } resultCh <- c }(container) } go func() { // Wait until all remove task and close error channnel then wg.Wait() close(resultCh) close(errCh) }() return resultCh, errCh }
func PullImage(client dockerclient.Client, service *Service, image string) error { taglessRemote, tag := parsers.ParseRepositoryTag(image) if tag == "" { image = utils.ImageReference(taglessRemote, tags.DEFAULTTAG) } repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) if err != nil { return err } authConfig := cliconfig.AuthConfig{} if service.context.ConfigFile != nil && repoInfo != nil && repoInfo.Index != nil { authConfig = registry.ResolveAuthConfig(service.context.ConfigFile, repoInfo.Index) } err = client.PullImage(image, &dockerclient.AuthConfig{ Username: authConfig.Username, Password: authConfig.Password, Email: authConfig.Email, }) if err != nil { logrus.Errorf("Failed to pull image %s: %v", image, err) } return err }
func run(client dockerclient.Client, args []string, input string) (int, error) { image := "drone/drone-exec:latest" entrypoint := []string{"/bin/drone-exec"} args = append(args, "--", input) conf := &dockerclient.ContainerConfig{ Image: image, Entrypoint: entrypoint, Cmd: args, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } info, err := docker.Run(client, conf, false) if err != nil { return 0, err } client.StopContainer(info.Id, 15) client.RemoveContainer(info.Id, true, true) return info.State.ExitCode, err }
func checkContainers(mappings []Mapping, docker dockerclient.Client, etc *etcd.Client) { for { pointers := map[string]Server{} containers, err := docker.ListContainers(false, true, "") logFatalIf(err) for _, container := range containers { name := extractContainerName(container) for _, mapping := range mappings { if isMappedName(mapping, name) { ports := extractContainerPorts(mapping, container) for _, port := range ports { key := fmt.Sprintf("/nginx/servers/%s/%s/%s", CLUSTER, mapping.Upstream, container.Id[0:12]) pointers[key] = newServer(HOST, port) } } } } for key, server := range pointers { go announce(key, server, etc) } time.Sleep(5 * time.Second) } }
// Stop a container func Stop(docker dockerclient.Client, id string) string { // Stop the container (with 5 seconds timeout) err := docker.StopContainer(id, 5) if err != nil { log.Fatal(err) } return "OK" }
// Start a container func Start(docker dockerclient.Client, id string) string { // Start the container hostConfig := &dockerclient.HostConfig{} err := docker.StartContainer(id, hostConfig) if err != nil { log.Fatal(err) } return "OK" }
// List the containers func List(docker dockerclient.Client) []dockerclient.Container { // Get only running containers containers, err := docker.ListContainers(true, false, "") if err != nil { log.Fatal(err) } return containers }
// createContainer creates a new container using the specified options. Per the // docker API, the created container is not running and must be started // explicitly. func createContainer(client dockerclient.Client, config dockerclient.ContainerConfig) (*Container, error) { id, err := client.CreateContainer(&config, "") if err != nil { return nil, err } return &Container{ ID: id, containerInfo: dockerclient.ContainerInfo{Id: id}, client: client}, nil }
func waitFor(once *sync.Once, client dockerclient.Client, endpoint string) { once.Do(func() { err := ClientOK(endpoint, func() bool { _, err := client.Info() return err == nil }) if err != nil { panic(err.Error()) } }) }
func GetContainerByName(client dockerclient.Client, name string) (*dockerclient.Container, error) { containers, err := client.ListContainers(true, false, NAME.Eq(name)) if err != nil { return nil, err } if len(containers) == 0 { return nil, nil } return &containers[0], nil }
func GetContainersByFilter(client dockerclient.Client, filter ...string) ([]dockerclient.Container, error) { filterResult := "" for _, value := range filter { if filterResult == "" { filterResult = value } else { filterResult = And(filterResult, value) } } return client.ListContainers(true, false, filterResult) }
// Create a container func Create(docker dockerclient.Client, name string, image string) string { // Create a container containerConfig := &dockerclient.ContainerConfig{ Image: image, // Cmd: []string{"bash"}, AttachStdin: true, Tty: true} id, err := docker.CreateContainer(containerConfig, name) if err != nil { log.Fatal(err) } return id }
// Wait blocks until the named container exits, returning the exit information. func Wait(client dockerclient.Client, name string) (*dockerclient.ContainerInfo, error) { defer func() { client.StopContainer(name, 5) client.KillContainer(name, "9") }() for attempts := 0; attempts < 5; attempts++ { done := client.Wait(name) <-done info, err := client.InspectContainer(name) if err != nil { return nil, err } if !info.State.Running { return info, nil } log.Debugf("attempting to resume waiting after %d attempts.\n", attempts) } return nil, errors.New("reached maximum wait attempts") }
func imageIDFromList(d dockerclient.Client, imageName string) (string, error) { images, err := d.ListImages() if err != nil { return "", stackerr.Wrap(err) } for _, i := range images { for _, t := range i.RepoTags { if t == imageName { return i.Id, nil } } } return "", nil }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() StdCopy(outw, errw, rc) // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } infoc <- info }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }
// Build a container func Build(docker dockerclient.Client, repoName string, context string) { // Build a docker image // some.tar contains the build context (Dockerfile any any files it needs to add/copy) dockerBuildContext, err := os.Open(context) defer dockerBuildContext.Close() buildImageConfig := &dockerclient.BuildImage{ Context: dockerBuildContext, RepoName: repoName, SuppressOutput: false, } reader, err := docker.BuildImage(buildImageConfig) if err != nil { log.Fatal(err) } log.Print(reader) }
func GetContainerById(client dockerclient.Client, id string) (*dockerclient.Container, error) { containers, err := client.ListContainers(true, false, "") if err != nil { return nil, err } if len(containers) == 0 { return nil, nil } for _, c := range containers { if c.Id == id { return &c, nil } } return nil, nil }
func (e *engine) runJobNotify(r *Task, client dockerclient.Client) error { name := fmt.Sprintf("drone_build_%d_notify", r.Build.ID) defer func() { client.KillContainer(name, "9") client.RemoveContainer(name, true, true) }() // encode the build payload to write to stdin // when launching the build container in, err := encodeToLegacyFormat(r) if err != nil { log.Errorf("failure to marshal work. %s", err) return err } args := DefaultNotifyArgs args = append(args, "--") args = append(args, string(in)) conf := &dockerclient.ContainerConfig{ Image: DefaultAgent, Entrypoint: DefaultEntrypoint, Cmd: args, Env: e.envs, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, MemorySwappiness: -1, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } log.Infof("preparing container %s", name) info, err := docker.Run(client, conf, name) if err != nil { log.Errorf("Error starting notification container %s. %s", name, err) } // for debugging purposes we print a failed notification executions // output to the logs. Otherwise we have no way to troubleshoot failed // notifications. This is temporary code until I've come up with // a better solution. if info != nil && info.State.ExitCode != 0 && log.GetLevel() >= log.InfoLevel { var buf bytes.Buffer rc, err := client.ContainerLogs(name, docker.LogOpts) if err == nil { defer rc.Close() stdcopy.StdCopy(&buf, &buf, io.LimitReader(rc, 50000)) } log.Infof("Notification container %s exited with %d", name, info.State.ExitCode) log.Infoln(buf.String()) } return err }
// ImageID returns the image ID for the given image name. If the imageName is // not known, it will also attempt to pull the image as well. func ImageID(d dockerclient.Client, imageName string, auth *dockerclient.AuthConfig) (string, error) { id, err := imageIDFromList(d, imageName) if err != nil { return "", err } if id != "" { return id, nil } if err := d.PullImage(imageName, auth); err != nil { return "", stackerr.Wrap(err) } id, err = imageIDFromList(d, imageName) if err != nil { return "", err } if id != "" { return id, nil } return "", stackerr.Newf("image named %q could not be identified", imageName) }
// Wait blocks until the named container exits, returning the exit information. func Wait(client dockerclient.Client, name string) (*dockerclient.ContainerInfo, error) { defer func() { client.StopContainer(name, 5) client.KillContainer(name, "9") }() errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(name, LogOptsTail) if err != nil { errc <- err return } io.Copy(ioutil.Discard, rc) rc.Close() info, err := client.InspectContainer(name) if err != nil { errc <- err return } infoc <- info }() select { case info := <-infoc: return info, nil case err := <-errc: return nil, err } }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // It's possible that the docker logs endpoint returns before the container // is done, we'll naively resume up to 5 times if when the logs unblocks // the container is still reported to be running. var total int64 for attempts := 0; attempts < 5; attempts++ { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() if total != 0 { // Discard off the total bytes we've received so far. // io.LimitReader returns EOF once it has read the specified number // of bytes as per https://golang.org/pkg/io/#LimitReader. r := io.LimitReader(rc, total) _, err := io.Copy(ioutil.Discard, r) if err != nil && err != io.EOF { log.Errorf("Error resuming streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } } rcv, err := StdCopy(outw, errw, rc) if err != nil { log.Errorf("Error streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } if !info.State.Running { // The container is no longer running, there should be no more logs to tail. infoc <- info return } total += rcv log.Debugf("Attempting to resume log tailing after receiving %d bytes. Attempts %d.\n", total, attempts) } errc <- errors.New("Maximum number of attempts made while tailing logs.") }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }
func (e *engine) runJob(c context.Context, r *Task, updater *updater, client dockerclient.Client) error { name := fmt.Sprintf("drone_build_%d_job_%d", r.Build.ID, r.Job.ID) defer func() { if r.Job.Status == model.StatusRunning { r.Job.Status = model.StatusError r.Job.Finished = time.Now().UTC().Unix() r.Job.ExitCode = 255 } if r.Job.Status == model.StatusPending { r.Job.Status = model.StatusError r.Job.Started = time.Now().UTC().Unix() r.Job.Finished = time.Now().UTC().Unix() r.Job.ExitCode = 255 } updater.SetJob(c, r) client.KillContainer(name, "9") client.RemoveContainer(name, true, true) }() // marks the task as running r.Job.Status = model.StatusRunning r.Job.Started = time.Now().UTC().Unix() // encode the build payload to write to stdin // when launching the build container in, err := encodeToLegacyFormat(r) if err != nil { log.Errorf("failure to marshal work. %s", err) return err } // CREATE AND START BUILD args := DefaultBuildArgs if r.Build.Event == model.EventPull { args = DefaultPullRequestArgs } args = append(args, "--") args = append(args, string(in)) conf := &dockerclient.ContainerConfig{ Image: DefaultAgent, Entrypoint: DefaultEntrypoint, Cmd: args, Env: e.envs, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } log.Infof("preparing container %s", name) client.PullImage(conf.Image, nil) _, err = docker.RunDaemon(client, conf, name) if err != nil { log.Errorf("error starting build container. %s", err) return err } // UPDATE STATUS err = updater.SetJob(c, r) if err != nil { log.Errorf("error updating job status as running. %s", err) return err } // WAIT FOR OUTPUT info, builderr := docker.Wait(client, name) switch { case info.State.ExitCode == 128: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusKilled case info.State.ExitCode == 130: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusKilled case builderr != nil: r.Job.Status = model.StatusError case info.State.ExitCode != 0: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusFailure default: r.Job.Status = model.StatusSuccess } // send the logs to the datastore var buf bytes.Buffer rc, err := client.ContainerLogs(name, docker.LogOpts) if err != nil && builderr != nil { buf.WriteString("Error launching build") buf.WriteString(builderr.Error()) } else if err != nil { buf.WriteString("Error launching build") buf.WriteString(err.Error()) log.Errorf("error opening connection to logs. %s", err) return err } else { defer rc.Close() stdcopy.StdCopy(&buf, &buf, io.LimitReader(rc, 5000000)) } // update the task in the datastore r.Job.Finished = time.Now().UTC().Unix() err = updater.SetJob(c, r) if err != nil { log.Errorf("error updating job after completion. %s", err) return err } err = updater.SetLogs(c, r, ioutil.NopCloser(&buf)) if err != nil { log.Errorf("error updating logs. %s", err) return err } log.Debugf("completed job %d with status %s.", r.Job.ID, r.Job.Status) return nil }
// RunDaemon creates the docker container, pulling images if necessary, starts // the container and returns the container information. It does not wait for // the container to exit. func RunDaemon(client dockerclient.Client, conf *dockerclient.ContainerConfig, name string) (*dockerclient.ContainerInfo, error) { // attempts to create the contianer id, err := client.CreateContainer(conf, name) if err != nil { // and pull the image and re-create if that fails err = client.PullImage(conf.Image, nil) if err != nil { return nil, err } id, err = client.CreateContainer(conf, name) if err != nil { client.RemoveContainer(id, true, true) return nil, err } } // fetches the container information info, err := client.InspectContainer(id) if err != nil { client.RemoveContainer(id, true, true) return nil, err } // starts the container err = client.StartContainer(id, &conf.HostConfig) if err != nil { client.RemoveContainer(id, true, true) return nil, err } return info, err }
func Start(client dockerclient.Client, conf *dockerclient.ContainerConfig, pull bool) (*dockerclient.ContainerInfo, error) { // force-pull the image if specified. if pull { log.Debugf("Pulling image %s", conf.Image) client.PullImage(conf.Image, nil) } // attempts to create the contianer id, err := client.CreateContainer(conf, "") if err != nil { // and pull the image and re-create if that fails err = client.PullImage(conf.Image, nil) if err != nil { log.Errorf("Error pulling %s. %s\n", conf.Image, err) return nil, err } id, err = client.CreateContainer(conf, "") if err != nil { log.Errorf("Error creating %s. %s\n", conf.Image, err) client.RemoveContainer(id, true, true) return nil, err } } // fetches the container information info, err := client.InspectContainer(id) if err != nil { log.Errorf("Error inspecting %s. %s\n", conf.Image, err) client.RemoveContainer(id, true, true) return nil, err } // starts the container err = client.StartContainer(id, &conf.HostConfig) if err != nil { log.Errorf("Error starting %s. %s\n", conf.Image, err) } return info, err }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // options to fetch the stdout and stderr logs // by tailing the output. logOptsTail := &dockerclient.LogOptions{ Follow: true, Stdout: true, Stderr: true, } // It's possible that the docker logs endpoint returns before the container // is done, we'll naively resume up to 5 times if when the logs unblocks // the container is still reported to be running. for attempts := 0; attempts < 5; attempts++ { if attempts > 0 { // When resuming the stream, only grab the last line when starting // the tailing. logOptsTail.Tail = 1 } // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() _, err = StdCopy(outw, errw, rc) if err != nil { log.Errorf("Error streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } if !info.State.Running { // The container is no longer running, there should be no more logs to tail. infoc <- info return } log.Debugf("Attempting to resume log tailing after %d attempts.\n", attempts) } errc <- errors.New("Maximum number of attempts made while tailing logs.") }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }
// Inspect a container func Inspect(docker dockerclient.Client, id string) *dockerclient.ContainerInfo { info, _ := docker.InspectContainer(id) log.Println(info) return info }