func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, error) { apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) if err != nil { return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) } for _, apiContainer := range apiContainers { // Sometimes the Docker API prefixes container names with / // like it does in these commands. But if there's no // set name, it just uses the ID without a /...ugh. switch len(apiContainer.Names) { case 0: if apiContainer.ID == name { return &apiContainer, nil } default: for _, containerName := range apiContainer.Names { if strings.TrimLeft(containerName, "/") == name { return &apiContainer, nil } } } } return nil, nil }
// GetContainerNum returns container number in the system func GetContainerNum(client *docker.Client, all bool) int { containers, err := client.ListContainers(docker.ListContainersOptions{All: all}) if err != nil { panic(fmt.Sprintf("Error list containers: %v", err)) } return len(containers) }
func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, error) { apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) if err != nil { return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) } for _, apiContainer := range apiContainers { // Sometimes the Docker API prefixes container names with / // like it does in these commands. But if there's no // set name, it just uses the ID without a /...ugh. var dockerContainerName string if len(apiContainer.Names) > 0 { dockerContainerName = strings.TrimLeft(apiContainer.Names[0], "/") } else { dockerContainerName = apiContainer.ID } if dockerContainerName == name { return &apiContainer, nil } } return nil, nil }
func cleanDuplicateContainer(client *docker.Client, project Project) error { containers, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { return err } for _, container := range containers { names := container.Names if len(names) > 0 && project.Name == strings.TrimLeft(names[0], "/") { if strings.Contains(container.Status, "Up") { fmt.Println("stopping", project.Name) if err := client.KillContainer(docker.KillContainerOptions{ID: container.ID}); err != nil { fmt.Println("could not stop container") return err } } if err := client.RemoveContainer(docker.RemoveContainerOptions{ ID: container.ID, }); err != nil { fmt.Println("could not remove container", err.Error()) return err } fmt.Println("removed duplicate container") break } } return nil }
func DoListContainerBenchmark(client *docker.Client, interval, testPeriod time.Duration, all bool, stopchan chan int) []int { startTime := time.Now() latencies := []int{} for { start := time.Now() client.ListContainers(docker.ListContainersOptions{All: all}) end := time.Now() latencies = append(latencies, int(end.Sub(start).Nanoseconds())) if stopchan == nil { if time.Now().Sub(startTime) >= testPeriod { return latencies } } else { select { case <-stopchan: return latencies default: } } if interval != 0 { time.Sleep(interval) } } return latencies }
func getContainers(client *docker.Client) []*docker.Container { allContainers, _ := client.ListContainers(docker.ListContainersOptions{All: false}) var containers []*docker.Container for _, c := range allContainers { container, _ := client.InspectContainer(c.ID) containers = append(containers, container) } return containers }
// GetContainerIDs returns all the container ids in the system func GetContainerIDs(client *docker.Client) (containerIDs []string) { containers, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { panic(fmt.Sprintf("Error list containers: %v", err)) } for _, container := range containers { containerIDs = append(containerIDs, container.ID) } return containerIDs }
func getContainers(client *docker.Client) ([]*RuntimeContainer, error) { apiContainers, err := client.ListContainers(docker.ListContainersOptions{ All: false, Size: false, }) if err != nil { return nil, err } containers := []*RuntimeContainer{} for _, apiContainer := range apiContainers { container, err := client.InspectContainer(apiContainer.ID) if err != nil { log.Printf("error inspecting container: %s: %s\n", apiContainer.ID, err) continue } registry, repository, tag := splitDockerImage(container.Config.Image) runtimeContainer := &RuntimeContainer{ ID: container.ID, Image: DockerImage{ Registry: registry, Repository: repository, Tag: tag, }, Name: strings.TrimLeft(container.Name, "/"), Gateway: container.NetworkSettings.Gateway, Addresses: []Address{}, Env: make(map[string]string), } for k, v := range container.NetworkSettings.Ports { address := Address{ IP: container.NetworkSettings.IPAddress, Port: k.Port(), } if len(v) > 0 { address.HostPort = v[0].HostPort } runtimeContainer.Addresses = append(runtimeContainer.Addresses, address) } for _, entry := range container.Config.Env { parts := strings.Split(entry, "=") runtimeContainer.Env[parts[0]] = parts[1] } containers = append(containers, runtimeContainer) } return containers, nil }
// GetContainerByName looks up the hosts containers with the specified name and // returns it, or an error. func GetContainerByName(client *dockerclient.Client, name string) (*dockerclient.APIContainers, error) { containers, err := client.ListContainers(dockerclient.ListContainersOptions{All: true, Filters: NAME.Eq(name)}) if err != nil { return nil, err } if len(containers) == 0 { return nil, nil } return &containers[0], nil }
// GetContainersByFilter looks up the hosts containers with the specified filters and // returns a list of container matching it, or an error. func GetContainersByFilter(client *dockerclient.Client, filters ...map[string][]string) ([]dockerclient.APIContainers, error) { var filterResult map[string][]string for _, filter := range filters { if filterResult == nil { filterResult = filter } else { filterResult = And(filterResult, filter) } } return client.ListContainers(dockerclient.ListContainersOptions{All: true, Filters: filterResult}) }
// GetContainerByID looks up the hosts containers with the specified Id and // returns it, or an error. func GetContainerByID(client *dockerclient.Client, id string) (*dockerclient.APIContainers, error) { containers, err := client.ListContainers( dockerclient.ListContainersOptions{All: true, Filters: map[string][]string{"id": {id}}}) if err != nil { return nil, err } if len(containers) == 0 { return nil, nil } return &containers[0], nil }
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) { apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) if err != nil { return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) } for _, apiContainer := range apiContainers { if apiContainer.ID == ID { return &apiContainer, nil } } return nil, nil }
// DoListContainerBenchmark does periodically ListContainers with specific interval, returns latencies of // all the calls in nanoseconds func DoListContainerBenchmark(client *docker.Client, interval, testPeriod time.Duration, listAll bool) []int { startTime := time.Now() latencies := []int{} for { start := time.Now() client.ListContainers(docker.ListContainersOptions{All: listAll}) latencies = append(latencies, int(time.Since(start).Nanoseconds())) if time.Now().Sub(startTime) >= testPeriod { return latencies } if interval != 0 { time.Sleep(interval) } } return latencies }
func getByLabel(client *dockerClient.Client, key, value string) (*dockerClient.APIContainers, error) { containers, err := client.ListContainers(dockerClient.ListContainersOptions{ All: true, Filters: map[string][]string{ config.LABEL: {fmt.Sprintf("%s=%s", key, value)}, }, }) if err != nil { return nil, err } if len(containers) == 0 { return nil, nil } sort.Sort(ByCreated(containers)) return &containers[0], nil }
// getExistingContainers получает от докера все запущенные контейнеры и добавляет // в ssh-сервер те, у кого есть ssh func getExistingContainers(server *SSHServer, client *docker.Client) { options := docker.ListContainersOptions{All: true} containers, err := client.ListContainers(options) if err != nil { log.Fatalf("Failed to get list of containers: %v", err) } for _, c := range containers { if len(c.Ports) == 0 { continue } for _, port := range c.Ports { // Если у контейнера есть 22 порт, то добавляем его в SSH-сервер if port.PrivatePort == 22 { log.Printf("Added %v: %s:%d\n", c.ID[:12], port.IP, port.PublicPort) server.AddHost(c.ID, fmt.Sprintf("%s:%d", port.IP, port.PublicPort)) } } } }
func listContainers(client *docker.Client, all bool) (map[string]*docker.Container, error) { var opts docker.ListContainersOptions if all { opts.All = true } else { opts.Filters = map[string][]string{"status": []string{"running"}} } l, err := client.ListContainers(opts) if err != nil { return nil, err } m := make(map[string]*docker.Container, 0) for _, c := range l { info, err := client.InspectContainer(c.ID) if err != nil { DEBUG("failed to inspect container: %s", err) } m[info.Name] = info } return m, nil }
func cleanVolumesDocker119(client *docker.Client, apiVersion string) { defer wg.Done() log.Printf("Vol Cleanup: starting volume cleanup(ver %s) ...", apiVersion) re := regexp.MustCompile(".*/([0-9a-fA-F]{64}).*") // volumesMap[id] = weight // weight = 0 ~ 99, increace on every iteration if it is not used // weight = 100, remove it volumesMap := make(map[string]int) volumeDir := path.Join(*pDockerRootDir, "volumes") for { containers, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { log.Println("Vol Cleanup: cannot get container list", err) time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) continue } else { inspect_error := false for _, container := range containers { containerInspect, err := client.InspectContainer(container.ID) if err != nil { inspect_error = true log.Println("Vol Cleanup: cannot get container inspect", err) break } for _, volPath := range containerInspect.Volumes { terms := re.FindStringSubmatch(volPath) if len(terms) == 2 { id := terms[1] volumesMap[id] = 0 } } } if inspect_error { time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) continue } } files, err := ioutil.ReadDir(volumeDir) if err != nil { log.Printf("Vol Cleanup: %s", err) } else { for _, f := range files { id := f.Name() weight := volumesMap[id] volumesMap[id] = weight + 1 } } // Remove the unused volumes counter := 0 for id, weight := range volumesMap { if weight >= 100 { volPath := path.Join(volumeDir, id) log.Printf("Vol Cleanup: removing volume %s", volPath) err := os.RemoveAll(volPath) if err != nil { log.Printf("Vol Cleanup: %s", err) } else { delete(volumesMap, id) counter += 1 } } } log.Printf("Vol Cleanup: %d volumes have been removed", counter) // Sleep log.Printf("Vol Cleanup: next cleanup will be start in %d seconds", *pVolumeCleanInterval) time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) } }
func cleanVolumesDocker118(client *docker.Client, apiVersion string) { defer wg.Done() log.Printf("Vol Cleanup: starting volume cleanup(ver %s) ...", apiVersion) // volumesMap[volPath] = weight // weight = 0 ~ 99, increace on every iteration if it is not used // weight = 100, remove it volumesMap := make(map[string]int) volumeDir1 := path.Join(*pDockerRootDir, "vfs/dir") volumeDir2 := path.Join(*pDockerRootDir, "volumes") for { containers, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { log.Println("Vol Cleanup: cannot get container list", err) time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) continue } else { inspect_error := false for _, container := range containers { containerInspect, err := client.InspectContainer(container.ID) if err != nil { inspect_error = true log.Println("Vol Cleanup: cannot get container inspect", err) break } for _, volPath := range containerInspect.Volumes { volumesMap[volPath] = 0 if strings.Contains(volPath, "docker/vfs/dir") { volPath2 := strings.Replace(volPath, "vfs/dir", "volumes", 1) volumesMap[volPath2] = 0 } } } if inspect_error { time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) continue } } files, err := ioutil.ReadDir(volumeDir1) if err != nil { log.Printf("Vol Cleanup: %s", err) } else { for _, f := range files { volPath := path.Join(volumeDir1, f.Name()) weight := volumesMap[volPath] volumesMap[volPath] = weight + 1 } } files, err = ioutil.ReadDir(volumeDir2) if err != nil { log.Printf("Vol Cleanup: %s", err) } else { for _, f := range files { volPath := path.Join(volumeDir2, f.Name()) weight := volumesMap[volPath] volumesMap[volPath] = weight + 1 } } // Remove the unused volumes counter := 0 for volPath, weight := range volumesMap { if weight == 100 { log.Printf("Vol Cleanup: removing volume %s", volPath) err := os.RemoveAll(volPath) if err != nil { log.Printf("Vol Cleanup: %s", err) } delete(volumesMap, volPath) counter += 1 } } log.Printf("Vol Cleanup: %d volumes have been removed", counter) // Sleep log.Printf("Vol Cleanup: next cleanup will be start in %d seconds", *pVolumeCleanInterval) time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second) } }
func cleanImages(client *docker.Client) { defer wg.Done() log.Printf("Img Cleanup: the following images will be locked: %s", *pImageLocked) log.Println("Img Cleanup: starting image cleanup ...") for { // imageIdMap[imageID] = isRemovable imageIdMap := make(map[string]bool) // Get the image ID list before the cleanup images, err := client.ListImages(docker.ListImagesOptions{All: false}) if err != nil { log.Println("Img Cleanup: cannot get images list", err) time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second) continue } for _, image := range images { imageIdMap[image.ID] = true } // Get the image IDs used by all the containers containers, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { log.Println("Img Cleanup: cannot get container list", err) time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second) continue } else { inspect_error := false for _, container := range containers { containerInspect, err := client.InspectContainer(container.ID) if err != nil { inspect_error = true log.Println("Img Cleanup: cannot get container inspect", err) break } delete(imageIdMap, containerInspect.Image) } if inspect_error { time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second) continue } } // Get all the locked image ID if *pImageLocked != "" { lockedImages := strings.Split(*pImageLocked, ",") for _, lockedImage := range lockedImages { imageInspect, err := client.InspectImage(strings.Trim(lockedImage, " ")) if err == nil { delete(imageIdMap, imageInspect.ID) } } } // Sleep for the delay time log.Printf("Img Cleanup: wait %d seconds for the cleaning", *pImageCleanDelayed) time.Sleep(time.Duration(*pImageCleanDelayed) * time.Second) // Get the image IDs used by all the containers again after the delay time containersDelayed, err := client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { log.Println("Img Cleanup: cannot get container list", err) time.Sleep(time.Duration(*pImageCleanInterval) * time.Second) continue } else { inspect_error := false for _, container := range containersDelayed { containerInspect, err := client.InspectContainer(container.ID) if err != nil { inspect_error = true log.Println("Img Cleanup: cannot get container inspect", err) break } delete(imageIdMap, containerInspect.Image) } if inspect_error { time.Sleep(time.Duration(*pImageCleanInterval) * time.Second) continue } } // Remove the unused images counter := 0 for id, removable := range imageIdMap { if removable { log.Printf("Img Cleanup: removing image %s", id) err := client.RemoveImage(id) if err != nil { log.Printf("Img Cleanup: %s", err) } counter += 1 } } log.Printf("Img Cleanup: %d images have been removed", counter) // Sleep again log.Printf("Img Cleanup: next cleanup will be start in %d seconds", *pImageCleanInterval) time.Sleep(time.Duration(*pImageCleanInterval) * time.Second) } }
func getContainers(client *docker.Client) ([]*RuntimeContainer, error) { apiContainers, err := client.ListContainers(docker.ListContainersOptions{ All: false, Size: false, }) if err != nil { return nil, err } containers := []*RuntimeContainer{} for _, apiContainer := range apiContainers { container, err := client.InspectContainer(apiContainer.ID) if err != nil { log.Printf("error inspecting container: %s: %s\n", apiContainer.ID, err) continue } registry, repository, tag := splitDockerImage(container.Config.Image) runtimeContainer := &RuntimeContainer{ ID: container.ID, Image: DockerImage{ Registry: registry, Repository: repository, Tag: tag, }, Name: strings.TrimLeft(container.Name, "/"), Hostname: container.Config.Hostname, Gateway: container.NetworkSettings.Gateway, Addresses: []Address{}, Env: make(map[string]string), Volumes: make(map[string]Volume), Node: SwarmNode{}, Labels: make(map[string]string), IP: container.NetworkSettings.IPAddress, IP6LinkLocal: container.NetworkSettings.LinkLocalIPv6Address, IP6Global: container.NetworkSettings.GlobalIPv6Address, } for k, v := range container.NetworkSettings.Ports { address := Address{ IP: container.NetworkSettings.IPAddress, IP6LinkLocal: container.NetworkSettings.LinkLocalIPv6Address, IP6Global: container.NetworkSettings.GlobalIPv6Address, Port: k.Port(), Proto: k.Proto(), } if len(v) > 0 { address.HostPort = v[0].HostPort address.HostIP = v[0].HostIP } runtimeContainer.Addresses = append(runtimeContainer.Addresses, address) } for k, v := range container.Volumes { runtimeContainer.Volumes[k] = Volume{ Path: k, HostPath: v, ReadWrite: container.VolumesRW[k], } } if container.Node != nil { runtimeContainer.Node.ID = container.Node.ID runtimeContainer.Node.Name = container.Node.Name runtimeContainer.Node.Address = Address{ IP: container.Node.IP, } } runtimeContainer.Env = splitKeyValueSlice(container.Config.Env) runtimeContainer.Labels = container.Config.Labels containers = append(containers, runtimeContainer) } return containers, nil }
// TriggerRefresh refreshes the logstash-forwarder configuration and restarts it. func TriggerRefresh(client *docker.Client, logstashEndpoint string, configFile string, quiet bool) { defer utils.TimeTrack(time.Now(), "Config generation") log.Debug("Generating configuration...") forwarderConfig := getConfig(logstashEndpoint, configFile) containers, err := client.ListContainers(docker.ListContainersOptions{All: false}) if err != nil { log.Fatalf("Unable to retrieve container list from docker: %s", err) } log.Debug("Found %d containers:", len(containers)) for i, c := range containers { log.Debug("%d. %s", i+1, c.ID) container, err := client.InspectContainer(c.ID) if err != nil { log.Fatalf("Unable to inspect container %s: %s", c.ID, err) } forwarderConfig.AddContainerLogFile(container) containerConfig, err := config.NewFromContainer(container) if err != nil { if !os.IsNotExist(err) { log.Error("Unable to look for logstash-forwarder config in %s: %s", container.ID, err) } } else { for _, file := range containerConfig.Files { file.Fields["host"] = container.Config.Hostname forwarderConfig.Files = append(forwarderConfig.Files, file) } } } const configPath = "/tmp/logstash-forwarder.conf" fo, err := os.Create(configPath) if err != nil { log.Fatalf("Unable to open %s: %s", configPath, err) } defer fo.Close() j, err := json.MarshalIndent(forwarderConfig, "", " ") if err != nil { log.Debug("Unable to MarshalIndent logstash-forwarder config: %s", err) } _, err = fo.Write(j) if err != nil { log.Fatalf("Unable to write logstash-forwarder config to %s: %s", configPath, err) } log.Info("Wrote logstash-forwarder config to %s", configPath) if running { log.Info("Waiting for logstash-forwarder to stop") // perhaps use SIGTERM first instead of just Kill()? // if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { if err := cmd.Process.Kill(); err != nil { log.Error("Unable to stop logstash-forwarder") } if _, err := cmd.Process.Wait(); err != nil { log.Error("Unable to wait for logstash-forwarder to stop: %s", err) } log.Info("Stopped logstash-forwarder") } cmd = exec.Command("logstash-forwarder", "-config", configPath, fmt.Sprintf("-quiet=%t", quiet)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { log.Fatalf("Unable to start logstash-forwarder: %s", err) } running = true log.Info("Starting logstash-forwarder...") }
func registerContainers(docker *dockerapi.Client, events chan *dockerapi.APIEvents, dns resolver.Resolver, containerDomain string, hostIP net.IP) error { // TODO add an options struct instead of passing all as parameters // though passing the events channel from an options struct was triggering // data race warnings within AddEventListener, so needs more investigation if events == nil { events = make(chan *dockerapi.APIEvents) } if err := docker.AddEventListener(events); err != nil { return err } if !strings.HasPrefix(containerDomain, ".") { containerDomain = "." + containerDomain } getAddress := func(container *dockerapi.Container) (net.IP, error) { for { if container.NetworkSettings.IPAddress != "" { return net.ParseIP(container.NetworkSettings.IPAddress), nil } if container.HostConfig.NetworkMode == "host" { if hostIP == nil { return nil, errors.New("IP not available with network mode \"host\"") } else { return hostIP, nil } } if strings.HasPrefix(container.HostConfig.NetworkMode, "container:") { otherId := container.HostConfig.NetworkMode[len("container:"):] var err error container, err = docker.InspectContainer(otherId) if err != nil { return nil, err } continue } return nil, fmt.Errorf("unknown network mode", container.HostConfig.NetworkMode) } } addContainer := func(containerId string) error { container, err := docker.InspectContainer(containerId) if err != nil { return err } addr, err := getAddress(container) if err != nil { return err } err = dns.AddHost(containerId, addr, container.Config.Hostname, container.Name[1:]+containerDomain) if err != nil { return err } env := parseContainerEnv(container.Config.Env, "DNS_") if dnsDomains, ok := env["DNS_RESOLVES"]; ok { if dnsDomains == "" { return errors.New("empty DNS_RESOLVES, should contain a comma-separated list with at least one domain") } port := 53 if portString := env["DNS_PORT"]; portString != "" { port, err = strconv.Atoi(portString) if err != nil { return errors.New("invalid DNS_PORT \"" + portString + "\", should contain a number") } } domains := strings.Split(dnsDomains, ",") err = dns.AddUpstream(containerId, addr, port, domains...) if err != nil { return err } } if bridge := container.NetworkSettings.Bridge; bridge != "" { bridgeAddr := net.ParseIP(container.NetworkSettings.Gateway) err = dns.AddHost("bridge:"+bridge, bridgeAddr, bridge) if err != nil { return err } } return nil } containers, err := docker.ListContainers(dockerapi.ListContainersOptions{}) if err != nil { return err } for _, listing := range containers { if err := addContainer(listing.ID); err != nil { log.Printf("error adding container %s: %s\n", listing.ID[:12], err) } } if err = dns.Listen(); err != nil { return err } defer dns.Close() for msg := range events { go func(msg *dockerapi.APIEvents) { switch msg.Status { case "start": if err := addContainer(msg.ID); err != nil { log.Printf("error adding container %s: %s\n", msg.ID[:12], err) } case "die": dns.RemoveHost(msg.ID) dns.RemoveUpstream(msg.ID) } }(msg) } return errors.New("docker event loop closed") }