func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { pruneFilters := filters.NewArgs() pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) warning := danglingWarning if opts.all { warning = allImageWarning } if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { return } report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) if err != nil { return } if len(report.ImagesDeleted) > 0 { output = "Deleted Images:\n" for _, st := range report.ImagesDeleted { if st.Untagged != "" { output += fmt.Sprintln("untagged:", st.Untagged) } else { output += fmt.Sprintln("deleted:", st.Deleted) } } spaceReclaimed = report.SpaceReclaimed } return }
// Events implements Service.Events. It listen to all real-time events happening // for the service, and put them into the specified chan. func (s *Service) Events(ctx context.Context, evts chan events.ContainerEvent) error { filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT, s.project.Name)) filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE, s.name)) client := s.clientFactory.Create(s) eventq, errq := client.Events(ctx, types.EventsOptions{ Filters: filter, }) go func() { for { select { case event := <-eventq: service := event.Actor.Attributes[labels.SERVICE.Str()] attributes := map[string]string{} for _, attr := range eventAttributes { attributes[attr] = event.Actor.Attributes[attr] } e := events.ContainerEvent{ Service: service, Event: event.Action, Type: event.Type, ID: event.Actor.ID, Time: time.Unix(event.Time, 0), Attributes: attributes, } evts <- e } } }() return <-errq }
func runList(dockerCli *command.DockerCli, opts listOptions) error { ctx := context.Background() client := dockerCli.Client() services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()}) if err != nil { return err } out := dockerCli.Out() if opts.quiet { PrintQuiet(out, services) } else { taskFilter := filters.NewArgs() for _, service := range services { taskFilter.Add("service", service.ID) } tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter}) if err != nil { return err } nodes, err := client.NodeList(ctx, types.NodeListOptions{}) if err != nil { return err } PrintNotQuiet(out, services, nodes, tasks) } return nil }
// RemoveOrphans implements project.RuntimeProject.RemoveOrphans. // It will remove orphan containers that are part of the project but not to any services. func (p *Project) RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error { client := p.clientFactory.Create(nil) filter := filters.NewArgs() filter.Add("label", labels.PROJECT.EqString(projectName)) containers, err := client.ContainerList(ctx, types.ContainerListOptions{ Filter: filter, }) if err != nil { return err } currentServices := map[string]struct{}{} for _, serviceName := range serviceConfigs.Keys() { currentServices[serviceName] = struct{}{} } for _, container := range containers { serviceLabel := container.Labels[labels.SERVICE.Str()] if _, ok := currentServices[serviceLabel]; !ok { if err := client.ContainerKill(ctx, container.ID, "SIGKILL"); err != nil { return err } if err := client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { return err } } } return nil }
func (c containerConfig) eventFilter() filters.Args { filter := filters.NewArgs() filter.Add("type", events.ContainerEventType) filter.Add("name", c.name()) filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) return filter }
func runEvents(dockerCli *client.DockerCli, opts *eventsOptions) error { eventFilterArgs := filters.NewArgs() // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. for _, f := range opts.filter { var err error eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) if err != nil { return err } } options := types.EventsOptions{ Since: opts.since, Until: opts.until, Filters: eventFilterArgs, } responseBody, err := dockerCli.Client().Events(context.Background(), options) if err != nil { return err } defer responseBody.Close() return streamEvents(responseBody, dockerCli.Out()) }
// CmdNetworkLs lists all the networks managed by docker daemon // // Usage: docker network ls [OPTIONS] func (cli *DockerCli) CmdNetworkLs(args ...string) error { cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Exact, 0) err := cmd.ParseFlags(args, true) if err != nil { return err } // Consolidate all filter flags, and sanity check them early. // They'll get process after get response from server. netFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := cli.client.NetworkList(options) if err != nil { return err } wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) // unless quiet (-q) is specified, print field titles if !*quiet { fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") } for _, networkResource := range networkResources { ID := networkResource.ID netName := networkResource.Name if !*noTrunc { ID = stringid.TruncateID(ID) } if *quiet { fmt.Fprintln(wr, ID) continue } driver := networkResource.Driver fmt.Fprintf(wr, "%s\t%s\t%s\t", ID, netName, driver) fmt.Fprint(wr, "\n") } wr.Flush() return nil }
func TestVolumeListError(t *testing.T) { client := &Client{ transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.VolumeList(context.Background(), filters.NewArgs()) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } }
func getSecretsByName(ctx context.Context, client client.APIClient, names []string) ([]swarm.Secret, error) { args := filters.NewArgs() for _, n := range names { args.Add("names", n) } return client.SecretList(ctx, types.SecretListOptions{ Filters: args, }) }
// parseSecrets retrieves the secrets from the requested names and converts // them to secret references to use with the spec func parseSecrets(client client.APIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) { secretRefs := make(map[string]*swarmtypes.SecretReference) ctx := context.Background() for _, secret := range requestedSecrets { secretRef := &swarmtypes.SecretReference{ SecretName: secret.Source, Target: &swarmtypes.SecretReferenceFileTarget{ Name: secret.Target, UID: secret.UID, GID: secret.GID, Mode: secret.Mode, }, } if _, exists := secretRefs[secret.Target]; exists { return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source) } secretRefs[secret.Target] = secretRef } args := filters.NewArgs() for _, s := range secretRefs { args.Add("names", s.SecretName) } secrets, err := client.SecretList(ctx, types.SecretListOptions{ Filters: args, }) if err != nil { return nil, err } foundSecrets := make(map[string]string) for _, secret := range secrets { foundSecrets[secret.Spec.Annotations.Name] = secret.ID } addedSecrets := []*swarmtypes.SecretReference{} for _, ref := range secretRefs { id, ok := foundSecrets[ref.SecretName] if !ok { return nil, fmt.Errorf("secret not found: %s", ref.SecretName) } // set the id for the ref to properly assign in swarm // since swarm needs the ID instead of the name ref.SecretID = id addedSecrets = append(addedSecrets, ref) } return addedSecrets, nil }
func TestContainersPruneError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), version: "1.25", } filters := filters.NewArgs() _, err := client.ContainersPrune(context.Background(), filters) assert.Error(t, err, "Error response from daemon: Server error") }
func TestNetworkListError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ Filters: filters.NewArgs(), }) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } }
func TestNetworksPruneError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), version: "1.25", } filters := filters.NewArgs() _, err := client.NetworksPrune(context.Background(), filters) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } }
func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { ctx := context.Background() // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } matchName := opts.matchName options := types.ImageListOptions{ MatchName: matchName, All: opts.all, Filters: imageFilterArgs, } images, err := dockerCli.Client().ImageList(ctx, options) if err != nil { return err } f := opts.format if len(f) == 0 { if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { f = dockerCli.ConfigFile().ImagesFormat } else { f = "table" } } imagesCtx := formatter.ImageContext{ Context: formatter.Context{ Output: dockerCli.Out(), Format: f, Quiet: opts.quiet, Trunc: !opts.noTrunc, }, Digest: opts.showDigests, Images: images, } imagesCtx.Write() return nil }
func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ Filter: filters, }) if err != nil { return []string{}, err } for _, c := range containers { ids = append(ids, c.ID) } return ids, nil }
func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { var tasks []swarm.Task filterArgs := filters.NewArgs() filterArgs.Add("desired-state", "running") filterArgs.Add("service", service) filters, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) return tasks }
func TestImageListApiBefore125(t *testing.T) { expectedFilter := "image:tag" client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { query := req.URL.Query() actualFilter := query.Get("filter") if actualFilter != expectedFilter { return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) } actualFilters := query.Get("filters") if actualFilters != "" { return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) } content, err := json.Marshal([]types.ImageSummary{ { ID: "image_id2", }, { ID: "image_id2", }, }) if err != nil { return nil, err } return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader(content)), }, nil }), version: "1.24", } filters := filters.NewArgs() filters.Add("reference", "image:tag") options := types.ImageListOptions{ Filters: filters, } images, err := client.ImageList(context.Background(), options) if err != nil { t.Fatal(err) } if len(images) != 2 { t.Fatalf("expected 2 images, got %v", images) } }
func TestImageSearchWithoutErrors(t *testing.T) { expectedURL := "/images/search" filterArgs := filters.NewArgs() filterArgs.Add("is-automated", "true") filterArgs.Add("stars", "3") expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` client := &Client{ transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } query := req.URL.Query() term := query.Get("term") if term != "some-image" { return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "some-image", term) } filters := query.Get("filters") if filters != expectedFilters { return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) } content, err := json.Marshal([]registry.SearchResult{ { Name: "anything", }, }) if err != nil { return nil, err } return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader(content)), }, nil }), } results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ Filters: filterArgs, }) if err != nil { t.Fatal(err) } if len(results) != 1 { t.Fatalf("expected a result, got %v", results) } }
func runList(dockerCli *client.DockerCli, opts listOptions) error { client := dockerCli.Client() netFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error netFilterArgs, err = filters.ParseFlag(f, netFilterArgs) if err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := client.NetworkList(context.Background(), options) if err != nil { return err } f := opts.format if len(f) == 0 { if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { f = dockerCli.ConfigFile().NetworksFormat } else { f = "table" } } sort.Sort(byNetworkName(networkResources)) networksCtx := formatter.NetworkContext{ Context: formatter.Context{ Output: dockerCli.Out(), Format: f, Quiet: opts.quiet, Trunc: !opts.noTrunc, }, Networks: networkResources, } networksCtx.Write() return nil }
func TestImagesFilterWithLabelFilter(t *testing.T) { engine := NewEngine("test", 0, engOpts) images := Images{ {dockerclient.Image{Id: "a"}, engine}, {dockerclient.Image{ Id: "b", Labels: map[string]string{"com.example.project": "bar"}, }, engine}, {dockerclient.Image{Id: "c"}, engine}, } filters := dockerfilters.NewArgs() filters.Add("label", "com.example.project=bar") result := images.Filter(ImageFilterOptions{All: true, Filters: filters}) assert.Equal(t, len(result), 1) assert.Equal(t, result[0].Id, "b") }
// ListByFilter looks up the hosts containers with the specified filters and // returns a list of container matching it, or an error. func ListByFilter(ctx context.Context, clientInstance client.ContainerAPIClient, containerFilters ...map[string][]string) ([]types.Container, error) { filterArgs := filters.NewArgs() // FIXME(vdemeester) I don't like 3 for loops >_< for _, filter := range containerFilters { for key, filterValue := range filter { for _, value := range filterValue { filterArgs.Add(key, value) } } } return clientInstance.ContainerList(ctx, types.ContainerListOptions{ All: true, Filter: filterArgs, }) }
// CmdVolumeLs outputs a list of Docker volumes. // // Usage: docker volume ls [OPTIONS] func (cli *DockerCli) CmdVolumeLs(args ...string) error { cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) volFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { var err error volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) if err != nil { return err } } volumes, err := cli.client.VolumeList(volFilterArgs) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { for _, warn := range volumes.Warnings { fmt.Fprintln(cli.err, warn) } fmt.Fprintf(w, "DRIVER \tVOLUME NAME") fmt.Fprintf(w, "\n") } for _, vol := range volumes.Volumes { if *quiet { fmt.Fprintln(w, vol.Name) continue } fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) } w.Flush() return nil }
func (l *LocalCluster) monitor(ctx context.Context) { if log.V(1) { log.Infof(ctx, "events monitor starts") defer log.Infof(ctx, "events monitor exits") } longPoll := func() bool { // If our context was cancelled, it's time to go home. if l.monitorCtx.Err() != nil { return false } args, err := filters.ParseFlag( fmt.Sprintf("label=Acceptance-cluster-id=%s", l.clusterID), filters.NewArgs()) maybePanic(err) eventq, errq := l.client.Events(l.monitorCtx, types.EventsOptions{ Filters: args, }) for { select { case err := <-errq: log.Infof(ctx, "event stream done, resetting...: %s", err) // Sometimes we get a random string-wrapped EOF error back. // Hard to assert on, so we just let this goroutine spin. return true case event := <-eventq: // Currently, the only events generated (and asserted against) are "die" // and "restart", to maximize compatibility across different versions of // Docker. switch event.Status { case eventDie, eventRestart: if !l.processEvent(ctx, event) { return false } } } } } for longPoll() { } }
// GetServices returns all Docker services mathcing the labels giben func (c *HTTPClient) GetServices(filterList map[string]string) []swarm.Service { defer func() { if r := recover(); r != nil { log.Print("Failed to lookup services: ", r) } }() filter := filters.NewArgs() for k, v := range filterList { filter.Add(k, v) } services, err := c.cli.ServiceList(context.Background(), types.ServiceListOptions{Filter: filter}) if err != nil { log.Print("Failed to lookup services: ", err) return []swarm.Service{} } return services }
func runList(dockerCli *client.DockerCli, opts listOptions) error { client := dockerCli.Client() volFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) if err != nil { return err } } volumes, err := client.VolumeList(context.Background(), volFilterArgs) if err != nil { return err } f := opts.format if len(f) == 0 { if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { f = dockerCli.ConfigFile().VolumesFormat } else { f = "table" } } sort.Sort(byVolumeName(volumes.Volumes)) volumeCtx := formatter.VolumeContext{ Context: formatter.Context{ Output: dockerCli.Out(), Format: f, Quiet: opts.quiet, }, Volumes: volumes.Volumes, } volumeCtx.Write() return nil }
func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { var tasks []swarm.Task filterArgs := filters.NewArgs() filterArgs.Add("desired-state", "running") filters, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) result := make(map[string]int) for _, task := range tasks { if task.Status.State == swarm.TaskStateRunning { result[task.Spec.ContainerSpec.Image]++ } } return result, nil }
func getNetworkIDByName(c *check.C, name string) string { var ( v = url.Values{} filterArgs = filters.NewArgs() ) filterArgs.Add("name", name) filterJSON, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) v.Set("filters", filterJSON) resp, body, err := request.Get(daemonHost(), "/networks?"+v.Encode()) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) nJSON := []types.NetworkResource{} err = json.NewDecoder(body).Decode(&nJSON) c.Assert(err, checker.IsNil) c.Assert(len(nJSON), checker.Equals, 1) return nJSON[0].ID }
func getNetworkIDByName(c *check.C, name string) string { var ( v = url.Values{} filterArgs = filters.NewArgs() ) filterArgs.Add("name", name) filterJSON, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) v.Set("filters", filterJSON) status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) nJSON := []types.NetworkResource{} err = json.Unmarshal(body, &nJSON) c.Assert(err, checker.IsNil) c.Assert(len(nJSON), checker.Equals, 1) return nJSON[0].ID }
func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { ctx := context.Background() client := dockerCli.Client() filter := opts.filter.Value() filter.Add("label", labelNamespace+"="+opts.namespace) services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) if err != nil { return err } out := dockerCli.Out() // if no services in this stack, print message and exit 0 if len(services) == 0 { fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) return nil } if opts.quiet { service.PrintQuiet(out, services) } else { taskFilter := filters.NewArgs() for _, service := range services { taskFilter.Add("service", service.ID) } tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) if err != nil { return err } nodes, err := client.NodeList(ctx, types.NodeListOptions{}) if err != nil { return err } service.PrintNotQuiet(out, services, nodes, tasks) } return nil }
func getStacks( ctx context.Context, apiclient client.APIClient, ) ([]*stack, error) { filter := filters.NewArgs() filter.Add("label", labelNamespace) services, err := apiclient.ServiceList( ctx, types.ServiceListOptions{Filter: filter}) if err != nil { return nil, err } m := make(map[string]*stack, 0) for _, service := range services { labels := service.Spec.Labels name, ok := labels[labelNamespace] if !ok { return nil, fmt.Errorf("cannot get label %s for service %s", labelNamespace, service.ID) } ztack, ok := m[name] if !ok { m[name] = &stack{ Name: name, Services: 1, } } else { ztack.Services++ } } var stacks []*stack for _, stack := range m { stacks = append(stacks, stack) } return stacks, nil }