func main() { cli, err := client.NewEnvClient() if err != nil { panic(err) } certPath := os.Getenv("DOCKER_CERT_PATH") hostConfig := &container.HostConfig{ NetworkMode: "serverlessdockervotingapp_default", Binds: []string{fmt.Sprintf("%s:%s", certPath, certPath)}, } inheritEnv := []string{"DOCKER_HOST", "DOCKER_MACHINE_NAME", "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH"} http.Handle("/vote/", &dcgi.Handler{ Image: "bfirsh/serverless-vote", Client: cli, HostConfig: hostConfig, InheritEnv: inheritEnv, Root: "/vote", // strip /vote from all URLs }) http.Handle("/result/", &dcgi.Handler{ Image: "bfirsh/serverless-result", Client: cli, HostConfig: hostConfig, InheritEnv: inheritEnv, Root: "/result", }) http.ListenAndServe(":80", nil) }
func cleanContainers(c *check.C) *docker.Project { client, err := dockerclient.NewEnvClient() c.Assert(err, check.IsNil) filterArgs := filters.NewArgs() filterArgs, err = filters.ParseFlag(d.KermitLabelFilter, filterArgs) c.Assert(err, check.IsNil) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) c.Assert(err, check.IsNil) for _, container := range containers { c.Logf("cleaning container %s…", container.ID) if err := client.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { c.Errorf("Error while removing container %s : %v\n", container.ID, err) } } return docker.NewProject(client) }
// CreateProject creates a compose project with the given name based on the // specified compose files func CreateProject(name string, composeFiles ...string) (*Project, error) { apiClient, err := client.NewEnvClient() if err != nil { return nil, err } composeProject, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFiles: composeFiles, ProjectName: name, }, }) if err != nil { return nil, err } p := &Project{ composeProject: composeProject, listenChan: make(chan project.Event), started: make(chan struct{}), stopped: make(chan struct{}), deleted: make(chan struct{}), client: apiClient, } // Listen to compose events go p.startListening() p.composeProject.AddListener(p.listenChan) return p, nil }
func runSave(cmd *cobra.Command, args []string) (reterr error) { if len(args) == 0 { return errors.New("image reference missing") } output, err := cmd.Flags().GetString("output") if err != nil { return err } if output == "-" && terminal.IsTerminal(int(os.Stdout.Fd())) { return errors.New("refusing to output to terminal, specify output file") } client, err := engineapi.NewEnvClient() if err != nil { return err } ctx, cancel := context.WithCancel(context.Background()) callOnSignal(ctx, cancel, syscall.SIGINT) defer cancel() graphdir, err := cmd.Flags().GetString("graph") if err != nil { return err } c, err := buildcache.New(client).Get(ctx, graphdir, args[0]) if err != nil { return err } if output == "-" { _, err := io.Copy(os.Stdout, c) return err } f, err := ioutil.TempFile(filepath.Dir(output), ".buildcache-") if err != nil { return err } defer func() { if reterr != nil { os.RemoveAll(f.Name()) } }() if n, err := io.Copy(f, c); err != nil { return err } else { logrus.Debugf("saving: %v", humanize.Bytes(uint64(n))) } if err := f.Sync(); err != nil { return err } if err := f.Close(); err != nil { return err } return os.Rename(f.Name(), output) }
// CreateLocal creates a new local cockroach cluster. The stopper is used to // gracefully shutdown the channel (e.g. when a signal arrives). The cluster // must be started before being used. func CreateLocal(cfg TestConfig, logDir string, privileged bool, stopper chan struct{}) *LocalCluster { select { case <-stopper: // The stopper was already closed, exit early. os.Exit(1) default: } if *cockroachImage == builderImageFull && !exists(*cockroachBinary) { log.Fatalf("\"%s\": does not exist", *cockroachBinary) } cli, err := client.NewEnvClient() maybePanic(err) retryingClient := retryingDockerClient{ resilientDockerClient: resilientDockerClient{APIClient: cli}, attempts: 10, timeout: 10 * time.Second, } return &LocalCluster{ client: retryingClient, stopper: stopper, config: cfg, // TODO(tschottdorf): deadlocks will occur if these channels fill up. events: make(chan Event, 1000), expectedEvents: make(chan Event, 1000), logDir: logDir, privileged: privileged, } }
// NewProjectFromEnv creates a project with a client that is build from environment variables. func NewProjectFromEnv(t *testing.T) *Project { client, err := client.NewEnvClient() if err != nil { t.Fatalf("Error while getting a docker client from env: %s", err.Error()) } return NewProject(client) }
// NewProjectFromEnv creates a project with a client that is build from environment variables. func NewProjectFromEnv() (*Project, error) { client, err := client.NewEnvClient() if err != nil { return nil, err } return NewProject(client), nil }
// CreateLocal creates a new local cockroach cluster. The stopper is used to // gracefully shutdown the channel (e.g. when a signal arrives). The cluster // must be started before being used. func CreateLocal(numLocal, numStores int, logDir string, stopper chan struct{}) *LocalCluster { select { case <-stopper: // The stopper was already closed, exit early. os.Exit(1) default: } if *cockroachImage == builderImage && !exists(*cockroachBinary) { log.Fatalf("\"%s\": does not exist", *cockroachBinary) } cli, err := dockerclient.NewEnvClient() maybePanic(err) return &LocalCluster{ client: cli, stopper: stopper, numLocal: numLocal, numStores: numStores, // TODO(tschottdorf): deadlocks will occur if these channels fill up. events: make(chan Event, 1000), expectedEvents: make(chan Event, 1000), logDir: logDir, } }
func cleanContainers(t *testing.T) *docker.Project { client, err := dockerclient.NewEnvClient() if err != nil { t.Fatal(err) } filterArgs := filters.NewArgs() if filterArgs, err = filters.ParseFlag(docker.KermitLabelFilter, filterArgs); err != nil { t.Fatal(err) } containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) if err != nil { t.Fatal(err) } for _, container := range containers { t.Logf("cleaning container %s…", container.ID) if err := client.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{ Force: true, }); err != nil { t.Errorf("Error while removing container %s : %v\n", container.ID, err) } } return docker.NewProject(client) }
func getDockerID() (dockerID string, err error) { dockerID = "" err = nil context := ctx.Background() // Default to Docker API Version corresponding to Docker v1.10 if os.Getenv("DOCKER_API_VERSION") == "" { if err = os.Setenv("DOCKER_API_VERSION", "1.22"); err != nil { log.Panicf("Cannot set default Docker API Version: '%s'", err) os.Exit(1) } } cli, err := apiclient.NewEnvClient() if err != nil { return } inf, err := cli.Info(context) if err != nil { return } dockerID = inf.ID return }
// Get a *dockerapi.Client, either using the endpoint passed in, or using // DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) { if len(dockerEndpoint) > 0 { glog.Infof("Connecting to docker on %s", dockerEndpoint) return dockerapi.NewClient(dockerEndpoint, "", nil, nil) } return dockerapi.NewEnvClient() }
func main() { var dockerAddr string var transferAddr string var debug bool flag.BoolVar(&debug, "DEBUG", false, "enable debug") flag.StringVar(&dockerAddr, "d", "tcp://192.168.99.100:2376", "docker daemon addr") flag.StringVar(&transferAddr, "t", "10.200.8.37:8433", "transfer addr") flag.Parse() if flag.NArg() < 1 { fmt.Println("need at least one container id") return } if debug { log.SetLevel(log.DebugLevel) } cli, _ := client.NewEnvClient() metric.SetGlobalSetting(cli, 2, 3, "vnbe", "eth0") client := statsd.CreateStatsDClient(transferAddr) ctx := context.Background() for i := 0; i < flag.NArg(); i++ { if c, err := cli.ContainerInspect(ctx, flag.Arg(i)); err != nil { fmt.Println(flag.Arg(i), err) continue } else { go start_watcher(client, c.ID, c.State.Pid) } } for { } }
func newDockerClient() { cli, err := client.NewEnvClient() if err != nil { fmt.Println(err) return } dockerClient = &DockerClient{cli} }
func (dc *DaemonConfig) getMounted() (map[string]*storage.Mount, map[string]int, error) { mounts := map[string]*storage.Mount{} counts := map[string]int{} now := time.Now() // XXX this loop will indefinitely run if the docker service is down. // This is intentional to ensure we don't take any action when docker is down. for { dockerClient, err := client.NewEnvClient() if err != nil { return nil, nil, errored.Errorf("Could not initiate docker client").Combine(err) } containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{}) if err != nil { if now.Sub(time.Now()) > dc.Global.Timeout { panic("Cannot contact docker") } logrus.Error(errored.Errorf("Could not query docker; retrying").Combine(err)) time.Sleep(time.Second) continue } for _, container := range containers { if container.State == "running" { for _, mount := range container.Mounts { if mount.Driver == dc.PluginName { mounts[mount.Name] = nil counts[mount.Name]++ } } } } break } for driverName := range backend.MountDrivers { cd, err := backend.NewMountDriver(driverName, dc.Global.MountPath) if err != nil { return nil, nil, err } mounted, err := cd.Mounted(dc.Global.Timeout) if err != nil { return nil, nil, err } for _, mount := range mounted { logrus.Debugf("Refreshing existing mount for %q: %v", mount.Volume.Name, *mount) mounts[mount.Volume.Name] = mount } } return mounts, counts, nil }
// getDockerClient obtains a new Docker client from the environment or // from a Docker machine, starting it if necessary and permitted func getDockerClient(out io.Writer, dockerMachine string, canStartDockerMachine bool) (*docker.Client, *dockerclient.Client, error) { if len(dockerMachine) > 0 { glog.V(2).Infof("Getting client for Docker machine %q", dockerMachine) dockerClient, engineAPIClient, err := getDockerMachineClient(dockerMachine, out, canStartDockerMachine) if err != nil { return nil, nil, errors.ErrNoDockerMachineClient(dockerMachine, err) } return dockerClient, engineAPIClient, nil } dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") dockerCertPath := os.Getenv("DOCKER_CERT_PATH") if len(dockerTLSVerify) > 0 && len(dockerCertPath) == 0 { dockerCertPath = cliconfig.ConfigDir() os.Setenv("DOCKER_CERT_PATH", dockerCertPath) } if glog.V(4) { dockerHost := os.Getenv("DOCKER_HOST") if len(dockerHost) == 0 && len(dockerTLSVerify) == 0 && len(dockerCertPath) == 0 { glog.Infof("No Docker environment variables found. Will attempt default socket.") } if len(dockerHost) > 0 { glog.Infof("Will try Docker connection with host (DOCKER_HOST) %q", dockerHost) } else { glog.Infof("No Docker host (DOCKER_HOST) configured. Will attempt default socket.") } if len(dockerTLSVerify) > 0 { glog.Infof("DOCKER_TLS_VERIFY=%s", dockerTLSVerify) } if len(dockerCertPath) > 0 { glog.Infof("DOCKER_CERT_PATH=%s", dockerCertPath) } } dockerClient, _, err := dockerutil.NewHelper().GetClient() if err != nil { return nil, nil, errors.ErrNoDockerClient(err) } // FIXME: Workaround for docker engine API client on OS X - sets the default to // the wrong DOCKER_HOST string if runtime.GOOS == "darwin" { dockerHost := os.Getenv("DOCKER_HOST") if len(dockerHost) == 0 { os.Setenv("DOCKER_HOST", "unix:///var/run/docker.sock") } } engineAPIClient, err := dockerclient.NewEnvClient() if err != nil { return nil, nil, errors.ErrNoDockerClient(err) } if err = dockerClient.Ping(); err != nil { return nil, nil, errors.ErrCannotPingDocker(err) } glog.V(4).Infof("Docker ping succeeded") return dockerClient, engineAPIClient, nil }
// Gather starts stats collection func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { var c *client.Client var err error defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} if d.Endpoint == "ENV" { c, err = client.NewEnvClient() if err != nil { return err } } else if d.Endpoint == "" { c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders) if err != nil { return err } } else { c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders) if err != nil { return err } } d.client = c } // Get daemon info err := d.gatherInfo(acc) if err != nil { fmt.Println(err.Error()) } // List containers opts := types.ContainerListOptions{} ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() containers, err := d.client.ContainerList(ctx, opts) if err != nil { return err } // Get container data var wg sync.WaitGroup wg.Add(len(containers)) for _, container := range containers { go func(c types.Container) { defer wg.Done() err := d.gatherContainer(c, acc) if err != nil { log.Printf("Error gathering container %s stats: %s\n", c.Names, err.Error()) } }(container) } wg.Wait() return nil }
// StartDaemon starts a daemon using the provided binary returning // a client to the binary, a close function, and error. func StartDaemon(ctx context.Context, binary string, lc LogCapturer) (DockerClient, func() error, error) { // Get Docker version of process previousVersion, err := versionutil.BinaryVersion(binary) if err != nil { return DockerClient{}, nil, fmt.Errorf("could not get binary version: %s", err) } logrus.Debugf("Starting daemon with %s", binary) binaryArgs := []string{} if previousVersion.LessThan(versionutil.StaticVersion(1, 8, 0)) { binaryArgs = append(binaryArgs, "--daemon") } else { binaryArgs = append(binaryArgs, "daemon") } binaryArgs = append(binaryArgs, "--log-level=debug") binaryArgs = append(binaryArgs, "--storage-driver="+getGraphDriver()) cmd := exec.Command(binary, binaryArgs...) cmd.Stdout = lc.Stdout() cmd.Stderr = lc.Stderr() if err := cmd.Start(); err != nil { return DockerClient{}, nil, fmt.Errorf("could not start daemon: %s", err) } logrus.Debugf("Waiting for daemon to start") time.Sleep(2 * time.Second) cli, err := client.NewEnvClient() if err != nil { return DockerClient{}, nil, fmt.Errorf("could not initialize client: %s", err) } // Wait for it to start for i := 0; ; i++ { v, err := cli.ServerVersion(ctx) if err == nil { logrus.Debugf("Established connection to daemon with version %s", v.Version) break } if i >= 10 { logrus.Fatalf("Failed to establish connection to daemon, check logs, quitting") } time.Sleep(time.Second) } kill := func() error { if err := cmd.Process.Kill(); err != nil { return err } time.Sleep(500 * time.Millisecond) return os.RemoveAll("/var/run/docker.pid") } return DockerClient{Client: cli, options: &clientutil.ClientOptions{}}, kill, nil }
//NewTarget initiates a new Target struct func NewTarget() (a Target, err error) { a.Client, err = client.NewEnvClient() if err != nil { log.Fatalf("unable to create Docker client: %v\n", err) } a.Info, err = a.Client.Info() if err != nil { log.Fatalf("unable to fetch Docker daemon info: %v\n", err) } err = a.createContainerList() return }
func findContainersForProject(name string) ([]types.Container, error) { client, err := client.NewEnvClient() if err != nil { return []types.Container{}, err } filterArgs := filters.NewArgs() if filterArgs, err = filters.ParseFlag(docker.KermitLabelFilter, filterArgs); err != nil { return []types.Container{}, err } return client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filter: filterArgs, }) }
func main() { cli, err := client.NewEnvClient() if err != nil { log.Fatal(err) } opts := types.EventsOptions{Filters: filters.NewArgs()} // opts.Filters.Add("action", "start") opts.Filters.Add("container", "container") ev, err := cli.Events(context.TODO(), opts) if err != nil { log.Fatal(err) } io.Copy(os.Stdout, ev) }
// getDockerClient safely returns the singleton instance of the Docker client. func getDockerClient() DockerClient { safeClient.Lock() defer safeClient.Unlock() if safeClient.client != nil { return safeClient.client } if dc, err := client.NewEnvClient(); err != nil { log.Printf("Could not get a docker client: %v", err) } else { safeClient.client = dc return dc } // The return statement is just to make golint happy about this and for // compliance with the API. exitWithCode(1) return nil }
// CreateClient creates a docker client based on the specified options. func CreateClient(c ClientOpts) (client.APIClient, error) { if c.Host == "" { if os.Getenv("DOCKER_API_VERSION") == "" { os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion) } client, err := client.NewEnvClient() if err != nil { return nil, err } return client, nil } apiVersion := c.APIVersion if apiVersion == "" { apiVersion = DefaultAPIVersion } if c.TLSOptions.CAFile == "" { c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile) } if c.TLSOptions.CertFile == "" { c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile) } if c.TLSOptions.KeyFile == "" { c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile) } if c.TrustKey == "" { c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile) } if c.TLSVerify { c.TLS = true } if c.TLS { c.TLSOptions.InsecureSkipVerify = !c.TLSVerify } var httpClient *http.Client if c.TLS { config, err := tlsconfig.Client(c.TLSOptions) if err != nil { return nil, err } tr := &http.Transport{ TLSClientConfig: config, } proto, addr, _, err := client.ParseHost(c.Host) if err != nil { return nil, err } sockets.ConfigureTransport(tr, proto, addr) httpClient = &http.Client{ Transport: tr, } } customHeaders := map[string]string{} customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS) client, err := client.NewClient(c.Host, apiVersion, httpClient, customHeaders) if err != nil { return nil, err } return client, nil }
// Build will build all images in the Parity setup func (c *DockerCompose) Build() error { log.Stage("Bulding containers") base := "Dockerfile" cwd, _ := os.Getwd() baseVersion := c.generateContainerVersion(cwd, base) imageName := fmt.Sprintf("%s:%s", c.ImageName, baseVersion) client, _ := dockerclient2.NewEnvClient() log.Step("Checking if image %s exists locally", imageName) if images, err := client.ImageList(context.Background(), types.ImageListOptions{MatchName: imageName}); err == nil { for _, i := range images { log.Info("Found image: %s", i.ID) return nil } } log.Step("Image %s not found locally, pulling", imageName) client.ImagePull(context.Background(), types.ImagePullOptions{ImageID: imageName}, nil) log.Step("Image %s not found anywhere, building", imageName) ctx, err := c.CreateTar(".", "Dockerfile") if err != nil { return err } defer ctx.Close() var progBuff io.Writer = os.Stdout var buildBuff io.Writer = os.Stdout // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon") logrus.Infof("Building %s...", imageName) outFd, isTerminalOut := term.GetFdInfo(os.Stdout) // Publish latest and specific version response, err := client.ImageBuild(context.Background(), types.ImageBuildOptions{ Context: body, Tags: []string{imageName, fmt.Sprintf("%s:latest", c.ImageName)}, NoCache: false, Remove: true, Dockerfile: "Dockerfile", }) if err != nil { log.Error(err.Error()) return err } err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, outFd, isTerminalOut, nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } fmt.Fprintf(os.Stderr, "%s%s", progBuff, buildBuff) return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code) } } return err }
func mkonion() (err error) { var ( oMappings *flagList = new(flagList) oPrivateKey string ) flag.Var(oMappings, "p", "specify a list of port mappings of the form '[onion:]container'") flag.StringVar(&oPrivateKey, "k", "", "specify a private_key to use for the hidden service") flag.Parse() oTargetContainer := flag.Arg(0) if flag.NArg() != 1 || oTargetContainer == "" { flag.Usage() return fmt.Errorf("must specify a container to create an onion service for") } // Load the private key. var privatekey []byte if oPrivateKey != "" { pk, err := ioutil.ReadFile(oPrivateKey) if err != nil { return fmt.Errorf("reading private key: %s", err) } // -k is technically unsafe if you don't know what it does log.WithFields(log.Fields{ "keypath": oPrivateKey, }).Warn("using the -k option results in your private key being embedded in the resulting image: do not share the image or any images derived from it with anybody") privatekey = pk } // Check the validity of arguments here. for _, arg := range *oMappings { ports := strings.SplitN(arg, ":", 2) if len(ports) == 0 || len(ports) > 2 { return fmt.Errorf("port mappings must be of the form '[onion:]container'") } for _, port := range ports { if !IsInteger(port) { return fmt.Errorf("port mappings must be integers") } } } cli, err := client.NewEnvClient() if err != nil { return fmt.Errorf("connecting to client: %s", err) } ident := generateIdentifier() networkID, err := CreateOnionNetwork(cli, ident) if err != nil { return fmt.Errorf("creating onion network: %s", err) } log.WithFields(log.Fields{ "network": ident, }).Info("created onion network") defer func() { if err != nil { if err := PurgeOnionNetwork(cli, networkID); err != nil { log.Warnf("purge onion network: %s", err) } } }() if err := ConnectOnionNetwork(cli, oTargetContainer, networkID); err != nil { return fmt.Errorf("connecting target to onion network: %s", err) } log.WithFields(log.Fields{ "network": ident, "container": oTargetContainer, }).Info("attached container to onion network") ip, err := FindOnionIPAddress(cli, oTargetContainer, networkID) if err != nil { return fmt.Errorf("finding target onion ip: %s", err) } log.WithFields(log.Fields{ "network": ident, "container": oTargetContainer, "ip": ip, }).Info("found target address") ports, err := FindTargetPorts(cli, oTargetContainer) if err != nil { return fmt.Errorf("finding target ports: %s", err) } // Add all exposed ports naively to mappings before parsing arguments. portMappings := map[string]string{} for _, port := range ports { log.Infof("forwarding port: %s", port) if port.Proto() != "tcp" { log.Warn("encountered non-TCP exposed port in container: %s", port) } portMappings[port.Port()] = port.Port() } // Now deal with arguments. for _, arg := range *oMappings { var onion, container string ports := strings.SplitN(arg, ":", 2) onion = ports[0] // The format is [onion:]container. switch len(ports) { case 2: container = ports[1] case 1: container = ports[0] default: return fmt.Errorf("port mappings must be of the form '[onion:]container'") } // Can't redefine external mappings. if _, ok := portMappings[onion]; ok { return fmt.Errorf("cannot have multiple definitons of onion port mappings") } portMappings[onion] = container } torrc, err := GenerateConfig(cli, GenerateTargetMappings(ip, portMappings)) if err != nil { return fmt.Errorf("generating torrc: %s", err) } log.Info("generated torrc config") buildOptions := &FakeBuildOptions{ ident: ident, networkID: networkID, torrc: torrc, privatekey: privatekey, } containerID, err := FakeBuildRun(cli, buildOptions) if err != nil { return fmt.Errorf("starting tor daemon: %s", err) } log.WithFields(log.Fields{ "container": containerID, }).Infof("tor daemon started") // XXX: This has issues because we need to wait for Tor to make a hostname. onionAddr, err := GetOnionHostname(cli, containerID) if err != nil { return fmt.Errorf("get onion hostname: %s", err) } log.WithFields(log.Fields{ "onion": onionAddr, }).Infof("retrieved Tor onion address") return nil }
// newEnvClient initializes a new API client based on environment variables.. Taken from github.com/docker/engine-api/client.go and fixes bug when no TLS is present // as well as adds validation to the conneciton URL to give users a more useful error message // Use DOCKER_HOST to set the url to the docker server. // Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. // Use DOCKER_CERT_PATH to load the tls certificates from. // Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. func newEnvClient() (*client.Client, error) { return client.NewEnvClient() }
func testDocker(t *testing.T, tag string, cmd []string) (exitCode int, logs string) { SkipUnlessLocal(t) l := StartCluster(t).(*cluster.LocalCluster) defer l.AssertAndStop(t) cli, err := dockerclient.NewEnvClient() if err != nil { t.Fatal(err) } addr := l.Nodes[0].PGAddr() containerConfig := container.Config{ Image: "cockroachdb/postgres-test:" + tag, Env: []string{ fmt.Sprintf("PGHOST=%s", addr.IP), fmt.Sprintf("PGPORT=%d", addr.Port), "PGSSLCERT=/certs/node.client.crt", "PGSSLKEY=/certs/node.client.key", }, Cmd: strslice.New(cmd...), } hostConfig := container.HostConfig{ Binds: []string{fmt.Sprintf("%s:%s", l.CertsDir, "/certs")}, NetworkMode: "host", } rc, err := cli.ImagePull(context.Background(), types.ImagePullOptions{ ImageID: containerConfig.Image, Tag: tag, }, nil) if err != nil { t.Fatal(err) } defer rc.Close() dec := json.NewDecoder(rc) for { var message jsonmessage.JSONMessage if err := dec.Decode(&message); err != nil { if err == io.EOF { break } t.Fatal(err) } log.Infof("ImagePull response: %s", message) } resp, err := cli.ContainerCreate(&containerConfig, &hostConfig, nil, "") if err != nil { t.Fatal(err) } for _, warning := range resp.Warnings { log.Warning(warning) } defer func() { if err := cli.ContainerRemove(types.ContainerRemoveOptions{ ContainerID: resp.ID, RemoveVolumes: true, }); err != nil { t.Error(err) } }() if err := cli.ContainerStart(resp.ID); err != nil { t.Fatal(err) } exitCode, err = cli.ContainerWait(context.Background(), resp.ID) if err != nil { t.Fatal(err) } if exitCode != 0 { rc, err := cli.ContainerLogs(context.Background(), types.ContainerLogsOptions{ ContainerID: resp.ID, ShowStdout: true, ShowStderr: true, }) if err != nil { t.Fatal(err) } defer rc.Close() b, err := ioutil.ReadAll(rc) if err != nil { t.Fatal(err) } logs = string(b) } return }
func (r *Reloader) Call2(msg HubMessage) error { log.Println("received message to reload ...") log.Printf("certPath %q, tls %v, host %v, api-version %v", os.Getenv("DOCKER_CERT_PATH"), os.Getenv("DOCKER_TLS_VERIFY"), os.Getenv("DOCKER_HOST"), os.Getenv("DOCKER_API_VERSION")) cli, err := client.NewEnvClient() //defaultHeaders := map[string]string{"User-Agent": "webhook-reloader"} //cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders) if err != nil { log.Print(err) return err } image := msg.Repository.RepoName tag := "latest" log.Printf("pull image %q with tag %q ...", image, tag) //ctx, cancel := context.WithTimeout(context.Background(), time.Duration(500) * time.Millisecond) //defer cancel() rc, err := cli.ImagePull( context.Background(), types.ImagePullOptions{ ImageID: msg.Repository.RepoName, Tag: tag}, nil) if err != nil { log.Print(err) return err } defer rc.Close() dec := json.NewDecoder(rc) for { var message jsonmessage.JSONMessage if err := dec.Decode(&message); err != nil { if err == io.EOF { break } log.Print(err) return err } log.Printf("%s", message) } containerName := "test" previousContainer := types.Container{} psOptions := types.ContainerListOptions{All: true} containers, err := cli.ContainerList(psOptions) if err != nil { log.Print(err) return err } for _, c := range containers { for _, name := range c.Names { log.Printf("%q/%q", c.ID, name) if name == fmt.Sprintf("/%s", containerName) { previousContainer = c } } } log.Printf("prev container %v", previousContainer) err = cli.ContainerStop(containerName, 10) if err != nil { log.Printf("stop %q: %v", containerName, err) //return err } rmOptions := types.ContainerRemoveOptions{ContainerID: containerName} err = cli.ContainerRemove(rmOptions) if err != nil { log.Printf("rm %q: %v", containerName, err) //return err } newContainerConfig := types.ContainerCreateConfig{} newContainerConfig.Config.Image = image port, err := nat.NewPort("8080", "http") exposedPorts := make(nat.PortSet) exposedPorts[port] = struct{}{} cli.ContainerCreate( &container.Config{ Image: image, ExposedPorts: exposedPorts}, &container.HostConfig{}, &network.NetworkingConfig{}, containerName) log.Printf("done.") return nil }
func main() { exclude := flag.String("exclude", "", "images to exclude, image:tag[,image:tag]") dryRun := flag.Bool("dry-run", false, "just list containers to remove") flag.Parse() if os.Getenv("DOCKER_HOST") == "" { err := os.Setenv("DOCKER_HOST", "unix:///var/run/docker.sock") if err != nil { log.Fatalf("error setting default DOCKER_HOST: %s", err) } } excluded := map[string]struct{}{} if len(*exclude) > 0 { for _, i := range strings.Split(*exclude, ",") { excluded[i] = struct{}{} } } docker, err := client.NewEnvClient() if err != nil { log.Fatalf("error creating docker client: %s", err) } topImages, err := docker.ImageList(types.ImageListOptions{}) if err != nil { log.Fatalf("error getting docker images: %s", err) } allImages, err := docker.ImageList(types.ImageListOptions{All: true}) if err != nil { log.Fatalf("error getting all docker images: %s", err) } imageTree := make(map[string]types.Image, len(allImages)) for _, image := range allImages { imageTree[image.ID] = image } containers, err := docker.ContainerList(types.ContainerListOptions{All: true}) if err != nil { log.Fatalf("error getting docker containers: %s", err) } used := map[string]string{} for _, container := range containers { inspected, err := docker.ContainerInspect(container.ID) if err != nil { log.Printf("error getting container info for %s: %s", container.ID, err) continue } used[inspected.Image] = container.ID parent := imageTree[inspected.Image].ParentID for { if parent == "" { break } used[parent] = container.ID parent = imageTree[parent].ParentID } } for _, image := range topImages { if _, ok := used[image.ID]; !ok { skip := false for _, tag := range image.RepoTags { if _, ok := excluded[tag]; ok { skip = true } if skip { break } } if skip { log.Printf("Skipping %s: %s", image.ID, strings.Join(image.RepoTags, ",")) continue } log.Printf("Going to remove %s: %s", image.ID, strings.Join(image.RepoTags, ",")) if !*dryRun { if len(image.RepoTags) < 2 { // <none>:<none> case, just remove by id _, err := docker.ImageRemove(types.ImageRemoveOptions{ImageID: image.ID, PruneChildren: true}) if err != nil { log.Printf("error while removing %s (%s): %s", image.ID, strings.Join(image.RepoTags, ","), err) } } else { // several tags case, remove each by name for _, r := range image.RepoTags { _, err := docker.ImageRemove(types.ImageRemoveOptions{ImageID: r, PruneChildren: true}) if err != nil { log.Printf("error while removing %s (%s): %s", r, strings.Join(image.RepoTags, ","), err) continue } } } } } } }
func main() { cli, err := client.NewEnvClient() if err != nil { panic(err) } options := types.ContainerListOptions{All: true} containers, err := cli.ContainerList(options) if err != nil { panic(err) } var container types.Container for _, c := range containers { for _, name := range c.Names { if strings.Contains(name, "slave-mysql") { container = c break } } } fmt.Printf("Found MySQL slave server at %v\n", container.Ports[0].IP) var mysqlLog = mysql.Logger(log.New(ioutil.Discard, "", 0)) dsn := fmt.Sprintf("root:mysql@tcp(%s:3306)/docker?charset=utf8", container.Ports[0].IP) db, err := sql.Open("mysql", dsn) if err != nil { log.Fatal(err) } mysql.SetLogger(mysqlLog) // Лочим таблицы if _, err := db.Exec(`FLUSH TABLES WITH READ LOCK;`); err != nil { log.Fatal(err) } repoName := "mysql-snapshot" tagName := time.Now().Format("20060102-150405") // Сохраняем контейнер в новый образ == из diff'а файловой системы делаем новый слой commitOptions := types.ContainerCommitOptions{ ContainerID: container.ID, RepositoryName: repoName, Tag: tagName, Comment: "Snapshooter", Author: "Snapshooter", Pause: true, } response, err := cli.ContainerCommit(commitOptions) fmt.Printf("Created new image with ID %v\n", response.ID) if err != nil { log.Fatal(err) } // Разблокируем таблицы // TODO: [MySQL] 2016/02/04 12:15:20 packets.go:32: unexpected EOF if _, err := db.Exec(`UNLOCK TABLES;`); err != nil { log.Print(err) } fmt.Println("\nStart container by:") fmt.Printf("\tdocker run -d -P -e 'affinity:container==slave-mysql' %s:%s\n", repoName, tagName) }