// dockerClients creates two *docker.Client, one for long running operations and // the other for shorter operations. In test / dev mode we can use ENV vars to // connect to the docker daemon. In production mode we will read docker.endpoint // from the config file. func (d *DockerDriver) dockerClients() (*docker.Client, *docker.Client, error) { if client != nil && waitClient != nil { return client, waitClient, nil } var err error var merr multierror.Error createClients.Do(func() { if err = shelpers.Init(); err != nil { d.logger.Printf("[FATAL] driver.docker: unable to initialize stats: %v", err) return } // Default to using whatever is configured in docker.endpoint. If this is // not specified we'll fall back on NewClientFromEnv which reads config from // the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and // DOCKER_CERT_PATH. This allows us to lock down the config in production // but also accept the standard ENV configs for dev and test. dockerEndpoint := d.config.Read("docker.endpoint") if dockerEndpoint != "" { cert := d.config.Read("docker.tls.cert") key := d.config.Read("docker.tls.key") ca := d.config.Read("docker.tls.ca") if cert+key+ca != "" { d.logger.Printf("[DEBUG] driver.docker: using TLS client connection to %s", dockerEndpoint) client, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca) } else { d.logger.Printf("[DEBUG] driver.docker: using standard client connection to %s", dockerEndpoint) client, err = docker.NewClient(dockerEndpoint) } client.HTTPClient.Timeout = dockerTimeout return } d.logger.Println("[DEBUG] driver.docker: using client connection initialized from environment") client, err = docker.NewClientFromEnv() if err != nil { merr.Errors = append(merr.Errors, err) } client.HTTPClient.Timeout = dockerTimeout waitClient, err = docker.NewClientFromEnv() if err != nil { merr.Errors = append(merr.Errors, err) } }) return client, waitClient, merr.ErrorOrNil() }
// dockerClient creates *docker.Client. In test / dev mode we can use ENV vars // to connect to the docker daemon. In production mode we will read // docker.endpoint from the config file. func (d *DockerDriver) dockerClient() (*docker.Client, error) { if client != nil { return client, nil } var err error createClient.Do(func() { // Default to using whatever is configured in docker.endpoint. If this is // not specified we'll fall back on NewClientFromEnv which reads config from // the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and // DOCKER_CERT_PATH. This allows us to lock down the config in production // but also accept the standard ENV configs for dev and test. dockerEndpoint := d.config.Read("docker.endpoint") if dockerEndpoint != "" { cert := d.config.Read("docker.tls.cert") key := d.config.Read("docker.tls.key") ca := d.config.Read("docker.tls.ca") if cert+key+ca != "" { d.logger.Printf("[DEBUG] driver.docker: using TLS client connection to %s", dockerEndpoint) client, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca) } else { d.logger.Printf("[DEBUG] driver.docker: using standard client connection to %s", dockerEndpoint) client, err = docker.NewClient(dockerEndpoint) } return } d.logger.Println("[DEBUG] driver.docker: using client connection initialized from environment") client, err = docker.NewClientFromEnv() }) return client, err }
func init() { var err error Client, err = docker.NewClientFromEnv() if err != nil { panic(err) } }
func (o *DockerbuildOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string) error { paths, envArgs, ok := cmdutil.SplitEnvironmentFromResources(args) if !ok { return kcmdutil.UsageError(cmd, "context directory must be specified before environment changes: %s", strings.Join(args, " ")) } if len(paths) != 2 { return kcmdutil.UsageError(cmd, "the directory to build and tag must be specified") } o.Arguments, _, _ = cmdutil.ParseEnvironmentArguments(envArgs) o.Directory = paths[0] o.Tag = paths[1] if len(o.DockerfilePath) == 0 { o.DockerfilePath = filepath.Join(o.Directory, "Dockerfile") } var mounts []dockerbuilder.Mount for _, s := range o.MountSpecs { segments := strings.Split(s, ":") if len(segments) != 2 { return kcmdutil.UsageError(cmd, "--mount must be of the form SOURCE:DEST") } mounts = append(mounts, dockerbuilder.Mount{SourcePath: segments[0], DestinationPath: segments[1]}) } o.Mounts = mounts client, err := docker.NewClientFromEnv() if err != nil { return err } o.Client = client o.Keyring = credentialprovider.NewDockerKeyring() return nil }
// dockerSetup does all of the basic setup you need to get a running docker // process up and running for testing. Use like: // // task := taskTemplate() // // do custom task configuration // client, handle, cleanup := dockerSetup(t, task) // defer cleanup() // // do test stuff // // If there is a problem during setup this function will abort or skip the test // and indicate the reason. func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) { if !testutil.DockerIsConnected(t) { t.SkipNow() } client, err := docker.NewClientFromEnv() if err != nil { t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) } driverCtx, execCtx := testDriverContexts(task) driver := NewDockerDriver(driverCtx) handle, err := driver.Start(execCtx, task) if err != nil { execCtx.AllocDir.Destroy() t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) } if handle == nil { execCtx.AllocDir.Destroy() t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) } cleanup := func() { handle.Kill() execCtx.AllocDir.Destroy() } return client, handle, cleanup }
// Get a *docker.Client, either using the endpoint passed in, or using // DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec func getDockerClient(dockerEndpoint string) (*docker.Client, error) { if len(dockerEndpoint) > 0 { glog.Infof("Connecting to docker on %s", dockerEndpoint) return docker.NewClient(dockerEndpoint) } return docker.NewClientFromEnv() }
func checkDockerVersion() error { startCheck("Docker up to date") dockerVersionTest, err := docker.NewClientFromEnv() if err != nil { return err } minDockerVersion, err := docker.NewAPIVersion("1.9") e, err := dockerVersionTest.Version() if err != nil { return err } currentVersionParts := strings.Split(e.Get("Version"), ".") currentVersion, err := docker.NewAPIVersion(fmt.Sprintf("%s.%s", currentVersionParts[0], currentVersionParts[1])) if err != nil { return err } if !(currentVersion.GreaterThanOrEqualTo(minDockerVersion)) { diagnose(Diagnosis{ Title: "Docker up to date", Description: "<fail>Docker engine is out of date (min: 1.9)</fail>", DocsLink: "https://docs.docker.com/engine/installation/", Kind: "fail", }) } else { diagnose(Diagnosis{ Title: "Docker up to date", Kind: "success", }) } return nil }
// TestConformance* compares the result of running the direct build against a // sequential docker build. A dockerfile and git repo is loaded, then each step // in the file is run sequentially, committing after each step. The generated // image.Config and the resulting filesystems are compared. The next step reuses // the previously generated layer and performs an incremental diff. This ensures // that each step is functionally equivalent. // // Deviations: // * Builds run at different times // * Modification timestamps are ignored on files // * Some processes (gem install) result in files created in the image that // have different content because of that (timestamps in files). We treat // a file that is identical except for size within 10 bytes and neither old // or new is zero bytes to be identical. // * Docker container commit with ENV FOO=BAR and a Docker build with line // ENV FOO=BAR will generate an image with FOO=BAR in different positions // (commit places the variable first, build: last). We try to align the // generated environment variable to ensure they are equal. // * The parent image ID is ignored. // // TODO: .dockerignore // TODO: check context dir // TODO: ONBUILD // TODO: ensure that the final built image has the right UIDs // func TestConformanceInternal(t *testing.T) { testCases := []conformanceTest{ { ContextDir: "fixtures/dir", }, // TODO: Fix this test // { // ContextDir: "fixtures/ignore", // }, { Dockerfile: "fixtures/Dockerfile.env", }, { Dockerfile: "fixtures/Dockerfile.edgecases", }, { Dockerfile: "fixtures/Dockerfile.exposedefault", }, { Dockerfile: "fixtures/Dockerfile.add", }, } c, err := docker.NewClientFromEnv() if err != nil { t.Fatal(err) } for i, test := range testCases { conformanceTester(t, c, test, i, *compareLayers) } }
// Ping implements the DockerInterface Ping method. func (c *KubeDocker) Ping() error { client, err := docker.NewClientFromEnv() if err != nil { return err } return client.Ping() }
func worker(requests int, image string, completeCh chan time.Duration) { client, err := docker.NewClientFromEnv() if err != nil { panic(err) } for i := 0; i < requests; i++ { start := time.Now() container, err := client.CreateContainer(docker.CreateContainerOptions{ Config: &docker.Config{ Image: image, }}) if err != nil { panic(err) } err = client.StartContainer(container.ID, nil) if err != nil { panic(err) } completeCh <- time.Since(start) } }
// DockerClient creates a docker client from environment func DockerClient() *dockerclient.Client { client, err := dockerclient.NewClientFromEnv() if err != nil { log.Fatalf("Unabled to create a Docker Client: Is Docker Machine installed and running?") } client.SkipServerVersionCheck = true return client }
// ClientOrDie creates a new Docker client. If one couldn't be created, logs and error and exits with status code 1 func ClientOrDie() *docker.Client { cl, err := docker.NewClientFromEnv() if err != nil { log.Err("creating new docker client (%s)", err) os.Exit(1) } return cl }
// NewControllerFromEnv creates a new Docker client using environment clues. func NewControllerFromEnv(out io.Writer) (*Controller, error) { client, err := docker.NewClientFromEnv() if err != nil { return nil, fmt.Errorf("could not create Docker client: %v", err) } return NewController(client, out) }
func NewDockerClient() (*docker.Client, error) { host := os.Getenv("DOCKER_HOST") hostIsLocal := host == "" || strings.HasPrefix(host, "unix://") if !hostIsLocal { log.Warnf("Detected DOCKER_HOST %s. This should not be remote.", host) } return docker.NewClientFromEnv() }
// GetClient returns a valid Docker client, the address of the client, or an error // if the client couldn't be created. func (_ *Helper) GetClient() (client *docker.Client, endpoint string, err error) { client, err = docker.NewClientFromEnv() if len(os.Getenv("DOCKER_HOST")) > 0 { endpoint = os.Getenv("DOCKER_HOST") } else { endpoint = "unix:///var/run/docker.sock" } return }
// Detect whether we are upgrading from a pre-CNI openshift and clean up // interfaces and iptables rules that are no longer required func (node *OsdnNode) dockerPreCNICleanup() error { exec := kexec.New() itx := ipcmd.NewTransaction(exec, "lbr0") itx.SetLink("down") if err := itx.EndTransaction(); err != nil { // no cleanup required return nil } node.clearLbr0IptablesRule = true // Restart docker to kill old pods and make it use docker0 again. // "systemctl restart" will bail out (unnecessarily) in the // OpenShift-in-a-container case, so we work around that by sending // the messages by hand. if _, err := osexec.Command("dbus-send", "--system", "--print-reply", "--reply-timeout=2000", "--type=method_call", "--dest=org.freedesktop.systemd1", "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager.Reload").CombinedOutput(); err != nil { log.Error(err) } if _, err := osexec.Command("dbus-send", "--system", "--print-reply", "--reply-timeout=2000", "--type=method_call", "--dest=org.freedesktop.systemd1", "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager.RestartUnit", "string:'docker.service' string:'replace'").CombinedOutput(); err != nil { log.Error(err) } // Delete pre-CNI interfaces for _, intf := range []string{"lbr0", "vovsbr", "vlinuxbr"} { itx := ipcmd.NewTransaction(exec, intf) itx.DeleteLink() itx.IgnoreError() itx.EndTransaction() } // Wait until docker has restarted since kubelet will exit it docker isn't running dockerClient, err := docker.NewClientFromEnv() if err != nil { return fmt.Errorf("failed to get docker client: %v", err) } err = kwait.ExponentialBackoff( kwait.Backoff{ Duration: 100 * time.Millisecond, Factor: 1.2, Steps: 6, }, func() (bool, error) { if err := dockerClient.Ping(); err != nil { // wait longer return false, nil } return true, nil }) if err != nil { return fmt.Errorf("failed to connect to docker after SDN cleanup restart: %v", err) } log.Infof("Cleaned up left-over openshift-sdn docker bridge and interfaces") return nil }
func GetDockerClient() *docker.Client { if dCli == nil { if d, err := docker.NewClientFromEnv(); err != nil { //log.Fatal(err) } else { dCli = d } } return dCli }
func newDockerClient() (*_dockerclient, error) { client, err := godocker.NewClientFromEnv() if err != nil { return nil, err } err = client.Ping() return &_dockerclient{ docker: client, }, err }
//Initializes the Docker Client func (c *CheckDocker) Init() error { //create the docker client var err error c.dockerClient, err = docker.NewClientFromEnv() if err != nil { c.Logger.Printf("[DEBUG] Error creating the Docker client: %s", err.Error()) return err } return nil }
// Init sets up the controller's Docker connection. func (c *Controller) Init(namePrefix string) error { var err error if c.docker, err = dockerclient.NewClientFromEnv(); err != nil { return err } c.NamePrefix = namePrefix c.NextJobId = 1 c.templates = make(map[string]*Template) return nil }
// StartDaemon starts a daemon using the provided binary returning // a client to the binary, a close function, and error. func StartDaemon(binary string, lc LogCapturer) (*dockerclient.Client, func() error, error) { // Get Docker version of process previousVersion, err := versionutil.BinaryVersion(binary) if err != nil { return nil, nil, fmt.Errorf("could not get binary version: %s", err) } logrus.Debugf("Starting daemon with %s", binary) binaryArgs := []string{} if previousVersion.LessThan(versionutil.StaticVersion(1, 8, 0)) { binaryArgs = append(binaryArgs, "--daemon") } else { binaryArgs = append(binaryArgs, "daemon") } binaryArgs = append(binaryArgs, "--log-level=debug") binaryArgs = append(binaryArgs, "--storage-driver="+getGraphDriver()) cmd := exec.Command(binary, binaryArgs...) cmd.Stdout = lc.Stdout() cmd.Stderr = lc.Stderr() if err := cmd.Start(); err != nil { return nil, nil, fmt.Errorf("could not start daemon: %s", err) } logrus.Debugf("Waiting for daemon to start") time.Sleep(2 * time.Second) client, err := dockerclient.NewClientFromEnv() if err != nil { return nil, nil, fmt.Errorf("could not initialize client: %s", err) } // Wait for it to start for i := 0; ; i++ { v, err := client.Version() if err == nil { logrus.Debugf("Established connection to daemon with version %s", v.Get("Version")) break } if i >= 10 { logrus.Fatalf("Failed to establish connection to daemon, check logs, quitting") } time.Sleep(time.Second) } kill := func() error { if err := cmd.Process.Kill(); err != nil { return err } time.Sleep(500 * time.Millisecond) return os.RemoveAll("/var/run/docker.pid") } return client, kill, nil }
// NewImageManager creates an instance of ImageManager func NewImageManager() (*ImageManager, error) { manager := &ImageManager{} client, err := dockerclient.NewClientFromEnv() manager.client = client if err != nil { return nil, err } return manager, nil }
func setupDockerClient() (*docker.Client, error) { dc, err := docker.NewClientFromEnv() if err != nil { return nil, err } env, err := dc.Version() if err != nil { return nil, err } log.Printf("Using Docker %v", env) return dc, nil }
func buildContainer(proj Project, dockersock string, debug bool) (*docker.Container, error) { l := NewLog("\t[dockerfile builder]", debug) containerImageName := fmt.Sprintf("%s-%s", proj.Branch, proj.Name) l.Trace("connecting to docker daemon running @", dockersock) var client *docker.Client var err error if dockersock == "unix:///var/run/docker.sock" { l.Trace("using", dockersock) client, err = docker.NewClient(dockersock) } else { l.Trace("creating docker client from env") client, err = docker.NewClientFromEnv() } if err != nil { l.Error(err) return nil, err } l.Trace("Cleaning duplicate containers") proj.Status.Write([]byte("Checking for old containers...\n")) if err := cleanDuplicateContainer(client, proj); err != nil { proj.Status.Write([]byte("Container check failed -> \n")) proj.Status.Write([]byte(err.Error())) l.Error("err cleaning containers", err) return nil, err } l.Trace("Building image", containerImageName) proj.Status.Write([]byte("Building image...\n")) if err := buildImage(client, containerImageName, proj.Archive); err != nil { proj.Status.Write([]byte("Build failed\n")) proj.Status.Write([]byte(err.Error())) return nil, err } l.Trace("Launching container ", proj.Name) proj.Status.Write([]byte("Launching container...\n")) container, err := launchContainer(client, containerImageName, proj.Name) if err != nil { proj.Status.Write([]byte("Launch failed\n")) proj.Status.Write([]byte(err.Error())) return nil, err } l.Trace(container.Name, " with id ", container.ID, "launched") return container, nil }
func TestDockerScriptCheck(t *testing.T) { if !testutil.DockerIsConnected(t) { return } client, err := docker.NewClientFromEnv() if err != nil { t.Fatalf("error creating docker client: %v", err) } if err := client.PullImage(docker.PullImageOptions{Repository: "busybox", Tag: "latest"}, docker.AuthConfiguration{}); err != nil { t.Fatalf("error pulling redis: %v", err) } container, err := client.CreateContainer(docker.CreateContainerOptions{ Config: &docker.Config{ Image: "busybox", Cmd: []string{"/bin/sleep", "1000"}, }, }) if err != nil { t.Fatalf("error creating container: %v", err) } defer removeContainer(client, container.ID) if err := client.StartContainer(container.ID, container.HostConfig); err != nil { t.Fatalf("error starting container", err) } check := &DockerScriptCheck{ id: "1", interval: 5 * time.Second, containerID: container.ID, logger: log.New(os.Stdout, "", log.LstdFlags), cmd: "/bin/echo", args: []string{"hello", "world"}, } res := check.Run() expectedOutput := "hello world" expectedExitCode := 0 if res.Err != nil { t.Fatalf("err: %v", res.Err) } if strings.TrimSpace(res.Output) != expectedOutput { t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output) } if res.ExitCode != expectedExitCode { t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode) } }
func dockerIsRemote(t *testing.T) bool { client, err := docker.NewClientFromEnv() if err != nil { return false } // Technically this could be a local tcp socket but for testing purposes // we'll just assume that tcp is only used for remote connections. if client.Endpoint()[0:3] == "tcp" { return true } return false }
// dockerClient creates *docker.Client. In test / dev mode we can use ENV vars // to connect to the docker daemon. In production mode we will read // docker.endpoint from the config file. func (d *DockerDriver) dockerClient() (*docker.Client, error) { // Default to using whatever is configured in docker.endpoint. If this is // not specified we'll fall back on NewClientFromEnv which reads config from // the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and // DOCKER_CERT_PATH. This allows us to lock down the config in production // but also accept the standard ENV configs for dev and test. dockerEndpoint := d.config.Read("docker.endpoint") if dockerEndpoint != "" { return docker.NewClient(dockerEndpoint) } return docker.NewClientFromEnv() }
// main function func main() { fmt.Println("Feed me a compose file now:") // Read in our compose file from stdin yamlbytes, err := ioutil.ReadAll(os.Stdin) // unmarshal it so we can enumerate our services yaml.Unmarshal(yamlbytes, &services) // create a new compose project p, err = docker.NewProject(&docker.Context{ Context: project.Context{ ComposeBytes: yamlbytes, ProjectName: "my-compose", // TODO make an environment variable }, }) if err != nil { log.Fatal(err) } // create our docker client link client, _ := dockerclient.NewClientFromEnv() // make and attach our listener channel events := make(chan *dockerclient.APIEvents) client.AddEventListener(events) // start watching for events go watchEvents(events) // main loop for { // look up how many nodes we have in the cluster // this is mainly for when a node is added nodes = getNodes(client) // Print the number of nodes we found log.Printf("Nodes: %d\n", nodes) // Do the heavy lifting once scale() // sleep for a bit, then check again time.Sleep(time.Minute) // TODO make an environment variable } }
// dockerData gather date from docker daemon directly (using DOCKER_HOST) // and from proc/DOCKER_PID/status and publish those to conn func dockerData(tags string, conn net.Conn) { if os.Getenv("DOCKER_HOST") == "" { log.Fatal(`please provide eg. DOCKER_HOST="tcp://127.0.0.1:8080"`) } // pid pidS := os.Getenv("DOCKER_PID") if pidS == "" { log.Fatal("cannot find docker deamon - please provide DOCKER_PID - try DOCKER_PID=`sudo lsof -t -sTCP:LISTEN -i :8080`") } pid, err := strconv.Atoi(pidS) if err != nil { log.Fatal(err) } log.Printf("docker pid=%d", pid) // docker client dockerClient, _ := docker.NewClientFromEnv() for { p, err := process.NewProcess(int32(pid)) // threads threads, err := p.NumThreads() if err != nil { log.Fatal(err) } mi, err := p.MemoryInfo() if err != nil { log.Fatal(err) } rss := mi.RSS vms := mi.VMS // docker info info, err := dockerClient.Info() // log.Printf("info=%#v", info) output := fmt.Sprintf("docker,driver=%s%s containers=%di,goroutines=%di,images=%di,threads=%di,vmsize=%di,rss=%di", info.Get("Driver"), tags, info.GetInt("Containers"), info.GetInt("NGoroutines"), info.GetInt("Images"), threads, vms, rss) n, err := conn.Write([]byte(output)) if err != nil { log.Fatal(err) } log.Println(n, output) time.Sleep(1 * time.Second) } }
// dockerIsConnected checks to see if a docker daemon is available (local or remote) func dockerIsConnected(t *testing.T) bool { client, err := docker.NewClientFromEnv() if err != nil { return false } // Creating a client doesn't actually connect, so make sure we do something // like call Version() on it. env, err := client.Version() if err != nil { t.Logf("Failed to connect to docker daemon: %s", err) return false } t.Logf("Successfully connected to docker daemon running version %s", env.Get("Version")) return true }