func runEvents(dockerCli *client.DockerCli, opts *eventsOptions) error { eventFilterArgs := filters.NewArgs() // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. for _, f := range opts.filter { var err error eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) if err != nil { return err } } options := types.EventsOptions{ Since: opts.since, Until: opts.until, Filters: eventFilterArgs, } responseBody, err := dockerCli.Client().Events(context.Background(), options) if err != nil { return err } defer responseBody.Close() return streamEvents(responseBody, dockerCli.Out()) }
func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) { serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{}) if err != nil { return []dockerData{}, err } networkListArgs := filters.NewArgs() networkListArgs.Add("driver", "overlay") networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs}) networkMap := make(map[string]*dockertypes.NetworkResource) if err != nil { log.Debug("Failed to network inspect on client for docker, error: %s", err) return []dockerData{}, err } for _, network := range networkList { networkToAdd := network networkMap[network.ID] = &networkToAdd } var dockerDataList []dockerData for _, service := range serviceList { dockerData := parseService(service, networkMap) dockerDataList = append(dockerDataList, dockerData) } return dockerDataList, err }
func runList(dockerCli *client.DockerCli, opts listOptions) error { ctx := context.Background() client := dockerCli.Client() services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()}) if err != nil { return err } out := dockerCli.Out() if opts.quiet { printQuiet(out, services) } else { taskFilter := filters.NewArgs() for _, service := range services { taskFilter.Add("service", service.ID) } tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter}) if err != nil { return err } running := map[string]int{} for _, task := range tasks { if task.Status.State == "running" { running[task.ServiceID]++ } } printTable(out, services, running) } return nil }
func runList(dockerCli *client.DockerCli, opts listOptions) error { ctx := context.Background() client := dockerCli.Client() services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()}) if err != nil { return err } out := dockerCli.Out() if opts.quiet { PrintQuiet(out, services) } else { taskFilter := filters.NewArgs() for _, service := range services { taskFilter.Add("service", service.ID) } tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter}) if err != nil { return err } nodes, err := client.NodeList(ctx, types.NodeListOptions{}) if err != nil { return err } PrintNotQuiet(out, services, nodes, tasks) } return nil }
// Refresh the status of a container running on the engine. If `full` is true, // the container will be inspected. func (e *Engine) refreshContainer(ID string, full bool) (*Container, error) { filterArgs := filters.NewArgs() filterArgs.Add("id", ID) opts := types.ContainerListOptions{ All: true, Size: false, Filter: filterArgs, } containers, err := e.apiClient.ContainerList(context.Background(), opts) e.CheckConnectionErr(err) if err != nil { return nil, err } if len(containers) > 1 { // We expect one container, if we get more than one, trigger a full refresh. err = e.RefreshContainers(full) return nil, err } if len(containers) == 0 { // The container doesn't exist on the engine, remove it. e.Lock() delete(e.containers, ID) e.Unlock() return nil, nil } _, err = e.updateContainer(containers[0], e.containers, full) e.RLock() container := e.containers[containers[0].ID] e.RUnlock() return container, err }
// ListContainers lists all containers matching the filter. func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { opts := dockertypes.ContainerListOptions{All: true} opts.Filter = dockerfilters.NewArgs() if filter != nil { if filter.Name != nil { opts.Filter.Add("name", filter.GetName()) } if filter.Id != nil { opts.Filter.Add("id", filter.GetId()) } if filter.State != nil { opts.Filter.Add("status", toDockerContainerStatus(filter.GetState())) } if filter.PodSandboxId != nil { // TODO: implement this after sandbox functions are implemented. } if filter.LabelSelector != nil { for k, v := range filter.LabelSelector { opts.Filter.Add("label", fmt.Sprintf("%s=%s", k, v)) } } } containers, err := ds.client.ListContainers(opts) if err != nil { return nil, err } // Convert docker to runtime api containers. result := []*runtimeApi.Container{} for _, c := range containers { result = append(result, toRuntimeAPIContainer(&c)) } return result, nil }
// Select returns a list of scape containers matching the passed Selector func Select(selector Selector, running bool) []types.Container { logger.Debug("Entering Select with selector ", selector) args := filters.NewArgs() // Filter just our containers args.Add("label", "com.dmp42.scape") // Any optional argument passed, add them if selector.Name != "" { args.Add("name", selector.Name) } /* if selector.Tags != "" { args.Add("label", fmt.Sprintf("com.dmp42.scape.tags=%s", selector.Tags)) } */ if selector.URL != "" { args.Add("label", fmt.Sprintf("com.dmp42.scape.url=%s", selector.URL)) } if selector.Path != "" { args.Add("label", fmt.Sprintf("com.dmp42.scape.path=%s", selector.Path)) } options := types.ContainerListOptions{All: !running, Filter: args} containers, err := cli.ContainerList(context.Background(), options) if err != nil { log.Fatal("Failed to run docker list command!", "err", err) } logger.Debug("Exiting Select with containers ", containers) return containers }
func cleanContainers(c *check.C) *docker.Project { client, err := dockerclient.NewEnvClient() c.Assert(err, check.IsNil) filterArgs := filters.NewArgs() filterArgs, err = filters.ParseFlag(d.KermitLabelFilter, filterArgs) c.Assert(err, check.IsNil) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) c.Assert(err, check.IsNil) for _, container := range containers { c.Logf("cleaning container %s…", container.ID) if err := client.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { c.Errorf("Error while removing container %s : %v\n", container.ID, err) } } return docker.NewProject(client) }
func (c containerConfig) eventFilter() filters.Args { filter := filters.NewArgs() filter.Add("type", events.ContainerEventType) filter.Add("name", c.name()) filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) return filter }
// RemoveOrphans implements project.RuntimeProject.RemoveOrphans. // It will remove orphan containers that are part of the project but not to any services. func (p *Project) RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error { client := p.clientFactory.Create(nil) filter := filters.NewArgs() filter.Add("label", labels.PROJECT.EqString(projectName)) containers, err := client.ContainerList(ctx, types.ContainerListOptions{ Filter: filter, }) if err != nil { return err } currentServices := map[string]struct{}{} for _, serviceName := range serviceConfigs.Keys() { currentServices[serviceName] = struct{}{} } for _, container := range containers { serviceLabel := container.Labels[labels.SERVICE.Str()] if _, ok := currentServices[serviceLabel]; !ok { if err := client.ContainerKill(ctx, container.ID, "SIGKILL"); err != nil { return err } if err := client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { return err } } } return nil }
func cleanContainers(t *testing.T) *docker.Project { client, err := dockerclient.NewEnvClient() if err != nil { t.Fatal(err) } filterArgs := filters.NewArgs() if filterArgs, err = filters.ParseFlag(docker.KermitLabelFilter, filterArgs); err != nil { t.Fatal(err) } containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) if err != nil { t.Fatal(err) } for _, container := range containers { t.Logf("cleaning container %s…", container.ID) if err := client.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { t.Errorf("Error while removing container %s : %v\n", container.ID, err) } } return docker.NewProject(client) }
//GetImageRevision get the image revision func (imageCreator LocalImageCreator) GetImageRevision(dockerInfo *DockerInfo) (*types.Image, error) { filters := filters.NewArgs() //append filters as required based on the input repoFilter := TAG_REPO + "=" + dockerInfo.RepoName filters.Add("label", repoFilter) applicationFilter := TAG_APPLICATION + "=" + dockerInfo.ImageName filters.Add("label", applicationFilter) revisionFilter := TAG_REVISION + "=" + dockerInfo.Revision filters.Add("label", revisionFilter) opts := types.ImageListOptions{All: false, Filters: filters} images, err := imageCreator.client.ImageList(context.Background(), opts) if err == nil && len(images) > 0 { return &images[0], err } return nil, err }
// CmdNetworkLs lists all the networks managed by docker daemon // // Usage: docker network ls [OPTIONS] func (cli *DockerCli) CmdNetworkLs(args ...string) error { cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Exact, 0) err := cmd.ParseFlags(args, true) if err != nil { return err } // Consolidate all filter flags, and sanity check them early. // They'll get process after get response from server. netFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := cli.client.NetworkList(context.Background(), options) if err != nil { return err } wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) // unless quiet (-q) is specified, print field titles if !*quiet { fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") } sort.Sort(byNetworkName(networkResources)) for _, networkResource := range networkResources { ID := networkResource.ID netName := networkResource.Name if !*noTrunc { ID = stringid.TruncateID(ID) } if *quiet { fmt.Fprintln(wr, ID) continue } driver := networkResource.Driver fmt.Fprintf(wr, "%s\t%s\t%s\t", ID, netName, driver) fmt.Fprint(wr, "\n") } wr.Flush() return nil }
// convertFilters converts filters to the filter type in engine-api. func convertFilters(filters map[string][]string) dockerfilters.Args { args := dockerfilters.NewArgs() for name, fields := range filters { for _, field := range fields { args.Add(name, field) } } return args }
// ListPodSandbox returns a list of Sandbox. func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { // By default, list all containers whether they are running or not. opts := dockertypes.ContainerListOptions{All: true} filterOutReadySandboxes := false opts.Filter = dockerfilters.NewArgs() f := newDockerFilter(&opts.Filter) // Add filter to select only sandbox containers. f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox) if filter != nil { if filter.Id != nil { f.Add("id", filter.GetId()) } if filter.State != nil { if filter.GetState() == runtimeApi.PodSandBoxState_READY { // Only list running containers. opts.All = false } else { // runtimeApi.PodSandBoxState_NOTREADY can mean the // container is in any of the non-running state (e.g., created, // exited). We can't tell docker to filter out running // containers directly, so we'll need to filter them out // ourselves after getting the results. filterOutReadySandboxes = true } } if filter.LabelSelector != nil { for k, v := range filter.LabelSelector { f.AddLabel(k, v) } } } containers, err := ds.client.ListContainers(opts) if err != nil { return nil, err } // Convert docker containers to runtime api sandboxes. result := []*runtimeApi.PodSandbox{} for i := range containers { c := containers[i] converted, err := toRuntimeAPISandbox(&c) if err != nil { glog.V(5).Infof("Unable to convert docker to runtime API sandbox: %v", err) continue } if filterOutReadySandboxes && converted.GetState() == runtimeApi.PodSandBoxState_READY { continue } result = append(result, converted) } return result, nil }
func TestVolumeListError(t *testing.T) { client := &Client{ transport: transport.NewMockClient(nil, transport.ErrorMock(http.StatusInternalServerError, "Server error")), } _, err := client.VolumeList(filters.NewArgs()) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } }
func TestNetworkListError(t *testing.T) { client := &Client{ transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ Filters: filters.NewArgs(), }) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } }
// List lists the containers managed by kermit func (p *Project) List() ([]types.Container, error) { filters := filters.NewArgs() for key, value := range p.Labels { filters.Add("label", fmt.Sprintf("%s=%s", key, value)) } containers, err := p.Client.ContainerList(context.Background(), types.ContainerListOptions{ Filter: filters, }) if err != nil { return nil, err } return containers, nil }
func runImages(dockerCli *client.DockerCli, opts imagesOptions) error { ctx := context.Background() // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } matchName := opts.matchName options := types.ImageListOptions{ MatchName: matchName, All: opts.all, Filters: imageFilterArgs, } images, err := dockerCli.Client().ImageList(ctx, options) if err != nil { return err } f := opts.format if len(f) == 0 { if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { f = dockerCli.ConfigFile().ImagesFormat } else { f = "table" } } imagesCtx := formatter.ImageContext{ Context: formatter.Context{ Output: dockerCli.Out(), Format: f, Quiet: opts.quiet, Trunc: !opts.noTrunc, }, Digest: opts.showDigests, Images: images, } imagesCtx.Write() return nil }
// RefreshVolumes refreshes the list of volumes on the engine. func (e *Engine) RefreshVolumes() error { volumesLsRsp, err := e.apiClient.VolumeList(context.Background(), filters.NewArgs()) e.CheckConnectionErr(err) if err != nil { return err } e.Lock() e.volumes = make(map[string]*Volume) for _, volume := range volumesLsRsp.Volumes { e.volumes[volume.Name] = &Volume{Volume: *volume, Engine: e} } e.Unlock() return nil }
func runList(dockerCli *client.DockerCli, opts listOptions) error { client := dockerCli.Client() netFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error netFilterArgs, err = filters.ParseFlag(f, netFilterArgs) if err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := client.NetworkList(context.Background(), options) if err != nil { return err } w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) if !opts.quiet { fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER\tSCOPE") fmt.Fprintf(w, "\n") } sort.Sort(byNetworkName(networkResources)) for _, networkResource := range networkResources { ID := networkResource.ID netName := networkResource.Name driver := networkResource.Driver scope := networkResource.Scope if !opts.noTrunc { ID = stringid.TruncateID(ID) } if opts.quiet { fmt.Fprintln(w, ID) continue } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t", ID, netName, driver, scope) fmt.Fprint(w, "\n") } w.Flush() return nil }
func TestEngineState(t *testing.T) { engine := NewEngine("test", 0, engOpts) engine.setState(stateUnhealthy) assert.False(t, engine.isConnected()) client := mockclient.NewMockClient() apiClient := engineapimock.NewMockClient() apiClient.On("Info", mock.Anything).Return(mockInfo, nil) apiClient.On("ServerVersion", mock.Anything).Return(mockVersion, nil) apiClient.On("NetworkList", mock.Anything, mock.AnythingOfType("NetworkListOptions"), ).Return([]types.NetworkResource{}, nil) apiClient.On("VolumeList", mock.Anything, mock.AnythingOfType("Args"), ).Return(types.VolumesListResponse{}, nil) apiClient.On("Events", mock.Anything, mock.AnythingOfType("EventsOptions")).Return(&nopCloser{infiniteRead{}}, nil) // The client will return one container at first, then a second one will appear. apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return([]types.Image{}, nil).Once() apiClient.On("ContainerList", mock.Anything, types.ContainerListOptions{All: true, Size: false}).Return([]types.Container{{ID: "one"}}, nil).Once() apiClient.On("ContainerInspect", mock.Anything, "one").Return(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{HostConfig: &containertypes.HostConfig{Resources: containertypes.Resources{CPUShares: 100}}}, Config: &containertypes.Config{}, NetworkSettings: &types.NetworkSettings{Networks: nil}}, nil).Once() filterArgs := filters.NewArgs() filterArgs.Add("id", "two") apiClient.On("ContainerList", mock.Anything, types.ContainerListOptions{All: true, Size: false, Filter: filterArgs}).Return([]types.Container{{ID: "two"}}, nil).Once() apiClient.On("ContainerInspect", mock.Anything, "two").Return(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{HostConfig: &containertypes.HostConfig{Resources: containertypes.Resources{CPUShares: 100}}}, Config: &containertypes.Config{}, NetworkSettings: &types.NetworkSettings{Networks: nil}}, nil).Once() assert.NoError(t, engine.ConnectWithClient(client, apiClient)) assert.True(t, engine.isConnected()) // The engine should only have a single container at this point. containers := engine.Containers() assert.Len(t, containers, 1) if containers[0].ID != "one" { t.Fatalf("Missing container: one") } // Fake an event which will trigger a refresh. The second container will appear. engine.handler(events.Message{ID: "two", Status: "created"}) containers = engine.Containers() assert.Len(t, containers, 2) if containers[0].ID != "one" && containers[1].ID != "one" { t.Fatalf("Missing container: one") } if containers[0].ID != "two" && containers[1].ID != "two" { t.Fatalf("Missing container: two") } client.Mock.AssertExpectations(t) apiClient.Mock.AssertExpectations(t) }
func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ Filter: filters, }) if err != nil { return []string{}, err } for _, c := range containers { ids = append(ids, c.ID) } return ids, nil }
func findContainersForProject(name string) ([]types.Container, error) { client, err := client.NewEnvClient() if err != nil { return []types.Container{}, err } filterArgs := filters.NewArgs() if filterArgs, err = filters.ParseFlag(docker.KermitLabelFilter, filterArgs); err != nil { return []types.Container{}, err } return client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) }
func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { var tasks []swarm.Task filterArgs := filters.NewArgs() filterArgs.Add("desired-state", "running") filterArgs.Add("service", service) filters, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) return tasks }
// RefreshNetworks refreshes the list of networks on the engine. func (e *Engine) RefreshNetworks() error { netLsOpts := types.NetworkListOptions{filters.NewArgs()} networks, err := e.apiClient.NetworkList(context.Background(), netLsOpts) e.CheckConnectionErr(err) if err != nil { return err } e.Lock() e.networks = make(map[string]*Network) for _, network := range networks { e.networks[network.ID] = &Network{NetworkResource: network, Engine: e} } e.Unlock() return nil }
// GetContainersByFilter looks up the hosts containers with the specified filters and // returns a list of container matching it, or an error. func GetContainersByFilter(client client.APIClient, containerFilters ...map[string][]string) ([]types.Container, error) { filterArgs := filters.NewArgs() // FIXME(vdemeester) I don't like 3 for loops >_< for _, filter := range containerFilters { for key, filterValue := range filter { for _, value := range filterValue { filterArgs.Add(key, value) } } } return client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) }
func TestImagesFilterWithLabelFilter(t *testing.T) { engine := NewEngine("test", 0, engOpts) images := Images{ {types.Image{ID: "a"}, engine}, {types.Image{ ID: "b", Labels: map[string]string{"com.example.project": "bar"}, }, engine}, {types.Image{ID: "c"}, engine}, } filters := dockerfilters.NewArgs() filters.Add("label", "com.example.project=bar") result := images.Filter(ImageFilterOptions{types.ImageListOptions{All: true, Filters: filters}}) assert.Equal(t, len(result), 1) assert.Equal(t, result[0].ID, "b") }
func TestImageSearchWithoutErrors(t *testing.T) { expectedURL := "/images/search" filterArgs := filters.NewArgs() filterArgs.Add("is-automated", "true") filterArgs.Add("stars", "3") expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` client := &Client{ transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } query := req.URL.Query() term := query.Get("term") if term != "some-image" { return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "some-image", term) } filters := query.Get("filters") if filters != expectedFilters { return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) } content, err := json.Marshal([]registry.SearchResult{ { Name: "anything", }, }) if err != nil { return nil, err } return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader(content)), }, nil }), } results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ Filters: filterArgs, }) if err != nil { t.Fatal(err) } if len(results) != 1 { t.Fatalf("expected a result, got %v", results) } }
func runList(dockerCli *client.DockerCli, opts listOptions) error { client := dockerCli.Client() netFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error netFilterArgs, err = filters.ParseFlag(f, netFilterArgs) if err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := client.NetworkList(context.Background(), options) if err != nil { return err } f := opts.format if len(f) == 0 { if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { f = dockerCli.ConfigFile().NetworksFormat } else { f = "table" } } sort.Sort(byNetworkName(networkResources)) networksCtx := formatter.NetworkContext{ Context: formatter.Context{ Output: dockerCli.Out(), Format: f, Quiet: opts.quiet, Trunc: !opts.noTrunc, }, Networks: networkResources, } networksCtx.Write() return nil }