// newDockerClient constructs a new docker client using the best available // method. If DOCKER_HOST is set, initialize the client using DOCKER_TLS_VERIFY // and DOCKER_CERT_PATH. If DOCKER_HOST is not set, look for the unix domain // socket in /run/docker.sock and /var/run/docker.sock. func newDockerClient() dockerclient.Client { if host := os.Getenv("DOCKER_HOST"); host != "" { if os.Getenv("DOCKER_TLS_VERIFY") == "" { c, err := dockerclient.NewDockerClient(host, nil) if err != nil { log.Fatal(err) } return c } c, err := dockerclient.NewDockerClient(host, getTLSConfig()) if err != nil { log.Fatal(err) } return c } for _, l := range []string{"/run/docker.sock", "/var/run/docker.sock"} { if _, err := os.Stat(l); err != nil { continue } c, err := dockerclient.NewDockerClient("unix://"+l, nil) if err != nil { return nil } return c } log.Fatal("docker not configured") return nil }
// BestEfforDockerClient creates a docker client from one of: // // 1. Environment variables as defined in // https://docs.docker.com/reference/commandline/cli/. Specifically // DOCKER_HOST, DOCKER_TLS_VERIFY & DOCKER_CERT_PATH. // // 2. bootdocker, if darwin. // // 3. /run/docker.sock, if it exists. // // 4. /var/run/docker.sock, if it exists. func BestEffortDockerClient() (*dockerclient.DockerClient, error) { host := os.Getenv("DOCKER_HOST") if host == "" { if runtime.GOOS == "darwin" { return Boot2DockerClient() } socketLocations := []string{"/run/docker.sock", "/var/run/docker.sock"} for _, l := range socketLocations { if _, err := os.Stat(l); err == nil { c, err := dockerclient.NewDockerClient(fmt.Sprintf("unix://%s", l), nil) if err != nil { return nil, stackerr.Wrap(err) } return c, nil } } return nil, stackerr.New("docker not configured") } if os.Getenv("DOCKER_TLS_VERIFY") != "" { return DockerWithTLS(host, os.Getenv("DOCKER_CERT_PATH")) } c, err := dockerclient.NewDockerClient(host, nil) if err != nil { return nil, stackerr.Wrap(err) } return c, nil }
func NewDriver() (*Driver, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { return nil, fmt.Errorf("could not connect to docker: %s", err) } // initiate the ovsdb manager port binding var ovsdb *libovsdb.OvsdbClient retries := 3 for i := 0; i < retries; i++ { ovsdb, err = libovsdb.Connect(localhost, ovsdbPort) if err == nil { break } log.Errorf("could not connect to openvswitch on port [ %d ]: %s. Retrying in 5 seconds", ovsdbPort, err) time.Sleep(5 * time.Second) } if ovsdb == nil { return nil, fmt.Errorf("could not connect to open vswitch") } d := &Driver{ dockerer: dockerer{ client: docker, }, ovsdber: ovsdber{ ovsdb: ovsdb, }, networks: make(map[string]*NetworkState), } // Initialize ovsdb cache at rpc connection setup d.ovsdber.initDBCache() return d, nil }
func main() { // Load the environment variables we need err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") } // Read the port port := os.Getenv("PORT") tlsConfig, err := getTLSConfig(os.Getenv("SWARM_CREDS_DIR")) if err != nil { log.Fatal("Could not create TLS certificate.") } docker, _ := dockerclient.NewDockerClient(os.Getenv("DOCKER_HOST"), tlsConfig) mux := mux.NewRouter() // mux.HandleFunc("/events", get_events(dbmap)).Methods("GET") // mux.HandleFunc("/events/{year}", get_events_by_year(dbmap)).Methods("GET") mux.HandleFunc("/spawn", spawn(docker)).Methods("GET") mux.HandleFunc("/list-containers", list_containers(docker)).Methods("GET") n := negroni.Classic() n.UseHandler(mux) log.Printf("Listening on port %s\n", port) n.Run(":" + port) }
// HandleEvents handles events func (ag *Agent) HandleEvents() error { opts := ag.pluginConfig.Instance recvErr := make(chan error, 1) go handleNetworkEvents(ag.netPlugin, opts, recvErr) go handleBgpEvents(ag.netPlugin, opts, recvErr) go handleEpgEvents(ag.netPlugin, opts, recvErr) go handleServiceLBEvents(ag.netPlugin, opts, recvErr) go handleSvcProviderUpdEvents(ag.netPlugin, opts, recvErr) go handleGlobalCfgEvents(ag.netPlugin, opts, recvErr) if ag.pluginConfig.Instance.PluginMode == "docker" { // watch for docker events docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) go docker.StartMonitorEvents(handleDockerEvents, recvErr, ag.netPlugin, recvErr) } else if ag.pluginConfig.Instance.PluginMode == "kubernetes" { // start watching kubernetes events k8splugin.InitKubServiceWatch(ag.netPlugin) } err := <-recvErr if err != nil { log.Errorf("Failure occured. Error: %s", err) return err } return nil }
func NewNode(controllerUrl string, dockerUrl string, tlsConfig *tls.Config, cpus float64, memory float64, heartbeatInterval int, ip string, showOnlyGridContainers bool, enableDebug bool) (*Node, error) { if enableDebug { log.SetLevel(log.DebugLevel) } u := uuid.NewV4() id := uuid.Formatter(u, uuid.CleanHyphen) client, err := dockerclient.NewDockerClient(dockerUrl, tlsConfig) if err != nil { return nil, err } node := &Node{ Id: id, client: client, controllerUrl: controllerUrl, heartbeatInterval: heartbeatInterval, showOnlyGridContainers: showOnlyGridContainers, ip: ip, Cpus: cpus, Memory: memory, } return node, nil }
func handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) m.SetReply(r) records := make([]dns.RR, 0) q := r.Question[0] if q.Qtype == dns.TypeA && strings.HasSuffix(q.Name, ".docker.") { docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", &tls.Config{}) name := strings.SplitN(q.Name, ".", 2)[0] containers, err := docker.ListContainers(false, false, fmt.Sprintf("{\"name\":[\"%s\"]}", name)) if err != nil { log.Fatal(err) } for _, c := range containers { info, _ := docker.InspectContainer(c.Id) log.Printf("Container %s[%6s] has ip %s\n", name, info.Id, info.NetworkSettings.IPAddress) records = append(records, &dns.A{ Hdr: dns.RR_Header{ Name: q.Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, A: net.ParseIP(info.NetworkSettings.IPAddress), }) } } m.Answer = append(m.Answer, records...) defer w.WriteMsg(m) }
func main() { random := rand.New(rand.NewSource(int64(int(time.Now().UnixNano()) * os.Getpid()))) config := parseFlags() ttl := time.Duration(float64(config.UpdateInterval) * 1.5) cache, err := NewCache(config.CacheURL, config.Id, ttl) if err != nil { log.Fatal(err) } docker, err := dockerclient.NewDockerClient(config.DockerURL) if err != nil { log.Fatal(err) } rtInfo := &RuntimeInfo{config.Id, cache, docker, ttl} log.Printf("Started monitoring Docker events (%s)\n", config.Id) docker.StartMonitorEvents(dockerEventCallback, rtInfo) go func() { // Garbage collect expired hosts at random interval for { cache.ClearExpiredHosts() offset := random.Intn(int(config.UpdateInterval.Seconds())) time.Sleep(config.UpdateInterval + (time.Duration(offset) * time.Second)) } }() func() { for { update(rtInfo) time.Sleep(config.UpdateInterval) } }() }
func New(config *Config) (m *Monitor, err error) { var authConfig *dockerclient.AuthConfig if config.AuthConfig != "" { authJson, err := base64.StdEncoding.DecodeString(config.AuthConfig) if err != nil { return nil, err } var auth dockerclient.AuthConfig if err = json.Unmarshal(authJson, &auth); err != nil { return nil, err } authConfig = &auth } client, err := dockerclient.NewDockerClient(config.URL, nil) if err != nil { return } m = &Monitor{ config: config, client: client, tasks: make(map[string]task.Task, 10), authConfig: authConfig, staticSource: task.NewStaticSource(), } return }
func init() { log.Info("registering heartbeat: docker") Add("docker", func(conf map[string]string) (Plugin, error) { host, ok := conf["host"] if !ok { host = os.Getenv("DOCKER_HOST") if host == "" { log.Info("no host information found, fallback to default: %s") host = DEFAULT_DOCKER_HOST } } log.Info("connecting to docker (%s)", host) // TODO support for tls docker, err := dockerclient.NewDockerClient(host, nil) if err != nil { return nil, err } return &DockerMonitor{ docker: docker, logger: logger.New("sentinel.plugins.heartbeats.docker"), }, nil }) }
// deleteDockNet deletes a network in docker daemon func deleteDockNet(tenantName, networkName, serviceName string) error { // do nothing in test mode if testMode { return nil } // Trim default tenant name docknetName := getDocknetName(tenantName, networkName, serviceName) // connect to docker docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { log.Errorf("Unable to connect to docker. Error %v", err) return errors.New("Unable to connect to docker") } log.Infof("Deleting docker network: %+v", docknetName) // Delete network err = docker.RemoveNetwork(docknetName) if err != nil { log.Errorf("Error deleting network %s. Err: %v", docknetName, err) // FIXME: Ignore errors till we fully move to docker 1.9 return nil } return nil }
// NewDriver creates a new MACVLAN Driver func NewDriver(version string, ctx *cli.Context) (*Driver, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { return nil, fmt.Errorf("could not connect to docker: %s", err) } // lower bound of v4 MTU is 68-bytes per rfc791 if ctx.Int("mtu") <= 0 { cliMTU = defaultMTU } else if ctx.Int("mtu") >= minMTU { cliMTU = ctx.Int("mtu") } else { log.Fatalf("The MTU value passed [ %d ] must be greater than [ %d ] bytes per rfc791", ctx.Int("mtu"), minMTU) } // Set the default mode to bridge if ctx.String("mode") == "" { macvlanMode = bridgeMode } switch ctx.String("mode") { case bridgeMode: macvlanMode = bridgeMode // todo: in other modes if relevant } d := &Driver{ networks: networkTable{}, dockerer: dockerer{ client: docker, }, } return d, nil }
func NewApiMiddleware(dockerAddr string) *ApiMiddleware { docker, err := dockerclient.NewDockerClient(dockerAddr, nil) if err != nil { return nil } return &ApiMiddleware{dockerClient: docker} }
func (u DockerResource) getContainers(request *restful.Request, response *restful.Response) { // Init the client docker, err := dockerclient.NewDockerClient(u.url, nil) if err != nil { log.Fatal("Couldn't connect to docker client") } if request.QueryParameter("detailed") == "false" { containers, err := docker.ListContainers(true, false, "") if err != nil { log.Println(err) log.Fatal("Unable to fetch running containers") } response.WriteEntity(containers) } else { containers, err := containers.ListContainersDetailed(docker) if err != nil { log.Println(err) log.Fatal("Unable to fetch running containers") } response.WriteEntity(containers) } }
func stopInjector(t, e string) { if t == "" { log.Error("Unspecified CID.") return } if e == "" { log.Error("Unspecified Docker endpoint.") return } client, err := docker.NewDockerClient(e, nil) if err != nil { log.Error(err) return } err = client.StopContainer(t, 30) if err != nil { if err == docker.ErrNotFound { log.Info("Unable to recover deleted injector: %s", t) } else { log.Error(err) } } }
// DockerWithTLS returns a DockerClient with the certs in the specified // directory. The names of the certs are the standard names of "cert.pem", // "key.pem" and "ca.pem". func DockerWithTLS(url, certPath string) (*dockerclient.DockerClient, error) { var tlsConfig *tls.Config clientCert, err := tls.LoadX509KeyPair( filepath.Join(certPath, "cert.pem"), filepath.Join(certPath, "key.pem"), ) if err != nil { return nil, stackerr.Wrap(err) } rootCAs := x509.NewCertPool() caCert, err := ioutil.ReadFile(filepath.Join(certPath, "ca.pem")) if err != nil { return nil, stackerr.Wrap(err) } rootCAs.AppendCertsFromPEM(caCert) tlsConfig = &tls.Config{ Certificates: []tls.Certificate{clientCert}, RootCAs: rootCAs, } client, err := dockerclient.NewDockerClient(url, tlsConfig) if err != nil { return nil, stackerr.Wrap(err) } return client, nil }
func RestartContainer(request common.RequestData) (code int, result string) { strDockerServer := fmt.Sprintf("%s:%d", request.ServerIP, request.Port) fmt.Println("strDockerServer=", strDockerServer) client, _ := dockerclient.NewDockerClient(strDockerServer, nil) strID, ok := GetContainerID(request.Params) if !ok { logger.Println("cannot Restart Container ") code = 1 result = "faild" return code, result } //fmt.Println("strID=", strID) nTime := 30 err := client.RestartContainer(strID, nTime) if err != nil { logger.Println("cannot get containers: %s", err) code = 1 result = "faild" return code, result } return 0, "ok" }
func main() { flag.Parse() docker, _ = dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) kong = kongo.NewKong(kongUrl) endpoints, err := kong.GetEndpoints() if err != nil { panic(err) } for _, endpoint := range endpoints { log.Printf("%s => %s\n", endpoint.Path, endpoint.TargetUrl) } // Get running containers containers, err := docker.ListContainers(false, false, "") if err != nil { log.Fatal(err) } for _, c := range containers { checkAndSync(c.Id) } // Listen to events docker.StartMonitorEvents(eventCallback, nil) // Hold the execution to look at the events coming for true { time.Sleep(3600 * time.Second) log.Println("PONG") } }
func main() { fmt.Println("Devices runner") // Init the client docker, err := dc.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { panic(err) } containers, err := docker.ListContainers(true) if err != nil { panic(err) } for _, c := range containers { fmt.Println("container: ", c.Names, " status: ", c.Status) } // let's create an API for starting devices http.HandleFunc("/device", func(w http.ResponseWriter, r *http.Request) { devicesHandler(w, r, docker) }) http.HandleFunc("/device/", func(w http.ResponseWriter, r *http.Request) { deviceHandler(w, r, docker) }) http.ListenAndServe(":8080", nil) }
func main() { flag.Parse() for { client, err := dockerclient.NewDockerClient(*addr, nil) if err != nil { log.Fatal(err) } expiredContainers, expiredImages, err := getExpired(client) if err != nil { log.Fatal(err) } if err := removeContainers(client, expiredContainers); err != nil { log.Fatal(err) } if err := removeImages(client, expiredImages); err != nil { log.Fatal(err) } if *interval == 0 { break } log.Printf("Sleeping for %s", *interval) time.Sleep(*interval) } }
// NewDockerProbe creates a new DockerProbe for testing docker func NewDockerProbe() (DockerProbe, error) { client, err := docker.NewDockerClient(dockerSocket, nil) if err != nil { return nil, err } return DockerProbe(dockerClient{Client: client}), nil }
//StopAndRemoveContainers stops and removes the containers in the given map func StopAndRemoveContainers(containers map[string]string) { if len(containers) == 0 { return } if os.Getenv("XT_CLEANUP_CONTAINERS") == "false" { log.Info("Skipping container stop and remove - XT_CLEANUP_CONTAINERS is false") return } // Init the client docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) log.Info("stop test containers") //Note that since xavi is linked to mountebank we stop/remove xavi first log.Info("stop xavi test container") docker.StopContainer(containers[XaviTestContainer], 5) log.Info("stop mountebank test container") docker.StopContainer(containers[MountebankTestContainer], 5) log.Info("remove the test containers") docker.RemoveContainer(containers[XaviTestContainer], false, false) docker.RemoveContainer(containers[MountebankTestContainer], false, false) }
// GetDockerNetworkName gets network name from network UUID func GetDockerNetworkName(nwID string) (string, string, string, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { log.Errorf("Unable to connect to docker. Error %v", err) return "", "", "", errors.New("Unable to connect to docker") } nwList, err := docker.ListNetworks("") if err != nil { log.Infof("Error: %v", err) return "", "", "", err } log.Infof("Got networks:") // find the network by uuid for _, nw := range nwList { log.Infof("%+v", nw) if nw.ID == nwID { log.Infof("Returning network name %s for ID %s", nw.Name, nwID) // parse the network name var tenantName, netName, serviceName string names := strings.Split(nw.Name, ".") if len(names) == 2 { // determine if this is service.network on default tenant or network.tenant _, err = netdGetNetwork(fmt.Sprintf("%s.%s", names[1], defaultTenantName)) if err == nil { // This is service.network on default tenant tenantName = defaultTenantName netName = names[1] serviceName = names[0] } else { // this is in network.tenant format tenantName = names[1] netName = names[0] } } else if len(names) == 3 { // has service.network.tenant format tenantName = names[2] netName = names[1] serviceName = names[0] } else if len(names) == 1 { // If only network is specified, use default tenant tenantName = defaultTenantName netName = names[0] } else { log.Errorf("Invalid network name format for network %s", nw.Name) return "", "", "", errors.New("Invalid format") } return tenantName, netName, serviceName, nil } } // UUID was not Found return "", "", "", errors.New("Network UUID not found") }
func (n *Node) Initial() *dockerclient.DockerClient { status := n.GetStatus(DockerPidFile) if status == true { docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) return docker } return nil }
func newDocker() dockerclient.Client { var ( err error docker dockerclient.Client ) if DOCKER_CERT_PATH == "" { docker, err = dockerclient.NewDockerClient(DOCKER_HOST, nil) } else { config := newTlsConfig(DOCKER_CERT_PATH) docker, err = dockerclient.NewDockerClient(DOCKER_HOST, config) } logFatalIf(err) return docker }
func NewDockerManager(c *Config, list ServiceListProvider, tlsConfig *tls.Config) (*DockerManager, error) { docker, err := dockerclient.NewDockerClient(c.dockerHost, tlsConfig) if err != nil { return nil, err } return &DockerManager{config: c, list: list, docker: docker}, nil }
func newClusterStateManager(daemonUrl string, tlsConfig *tls.Config) (*ClusterStateManager, error) { store := make(map[string]*ClusterState) client, err := dockerclient.NewDockerClient(daemonUrl, tlsConfig) if err != nil { return nil, err } return &ClusterStateManager{DockerClient: client, Store: store}, nil }
func GetDockerCient(c *cli.Context) (*dockerclient.DockerClient, string, error) { host := c.GlobalString("host") if host == "" { logs.Error.Println("Incorrect usage, please set the docker host") return nil, "", errors.New("Unable to connect to docker host") } tlsConfig := &tls.Config{} certPath := c.GlobalString("cert") if certPath != "" { caFile := filepath.Join(certPath, "ca.pem") if _, err := os.Stat(caFile); os.IsNotExist(err) { logs.Error.Println("Cannot open file : " + caFile) logs.Error.Println("Incorrect usage, please set correct cert files") return nil, host, errors.New("Unable to connect to docker host") } certFile := filepath.Join(certPath, "cert.pem") if _, err := os.Stat(certFile); os.IsNotExist(err) { logs.Error.Println("Cannot open file : " + certFile) logs.Error.Println("Incorrect usage, please set correct cert files") return nil, host, errors.New("Unable to connect to docker host") } keyFile := filepath.Join(certPath, "key.pem") if _, err := os.Stat(keyFile); os.IsNotExist(err) { logs.Error.Println("Cannot open file : " + keyFile) logs.Error.Println("Incorrect usage, please set correct cert files") return nil, host, errors.New("Unable to connect to docker host") } cert, _ := tls.LoadX509KeyPair(certFile, keyFile) pemCerts, _ := ioutil.ReadFile(caFile) tlsConfig.RootCAs = x509.NewCertPool() tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.Certificates = []tls.Certificate{cert} tlsConfig.RootCAs.AppendCertsFromPEM(pemCerts) } docker, err := dockerclient.NewDockerClient(host, tlsConfig) if err != nil { logs.Error.Println("Unable to connect to docker host") return nil, host, err } version, err := docker.Version() if err != nil { logs.Error.Println("Unable to ping docker host") logs.Error.Println(err) return nil, host, err } logs.Trace.Println("Connected to Docker Host " + host) logs.Debug.Println("Docker Version: " + version.Version) logs.Debug.Println("Git Commit:" + version.GitCommit) logs.Debug.Println("Go Version:" + version.GoVersion) return docker, host, err }
//SpawnTestContainers checks the status of the required test containers that must be running //in able for the acceptance tests to execute successfully. If the containers are not running then //they are started. The map that is returned containers container name to container id mapping. func SpawnTestContainers() map[string]string { var bootedOneOrMoreContainers bool containerMapping := make(map[string]string) // Init the client log.Println("Create docker client") docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) // Is the mountebank container already running? log.Println("Check to see if mountebank test container is already started") info := testcontainer.GetAcceptanceTestContainerInfo(docker, "atest-mb") if info != nil { log.Println("Mountebank container found - state is: ", info.State.StateString()) containerMapping[MountebankTestContainer] = info.Id } else { log.Println("Mountebank container not running - create container context") bootedOneOrMoreContainers = true containerCtx := createMountebankTestContainerContext() //Create and start the container. log.Println("Create and start the container") mountebankContainerId := testcontainer.CreateAndStartContainer(docker, containerCtx) containerMapping[MountebankTestContainer] = mountebankContainerId //Need to get test container info after start as name needed to link second container to it. info = testcontainer.GetAcceptanceTestContainerInfo(docker, "atest-mb") } var mbContainerName = info.Name // Is the xavi test container already running? log.Println("Check to see if xavi test container is already started") info = testcontainer.GetAcceptanceTestContainerInfo(docker, "atest-xavi") if info != nil { log.Println("Xavi test container found - state is: ", info.State.StateString()) log.Println("Xavi container links - ", info.HostConfig.Links) containerMapping[XaviTestContainer] = info.Id } else { log.Println("Xavi test container not running - create container context") bootedOneOrMoreContainers = true xaviContainerCtx := createXaviTestContainerContext(mbContainerName) //Create and start the container. log.Println("Create and start the container") xaviTestContainerId := testcontainer.CreateAndStartContainer(docker, xaviContainerCtx) containerMapping[XaviTestContainer] = xaviTestContainerId } // Pause to let the containers boot if bootedOneOrMoreContainers { //If the test hits the xavi container right away the test set up fails, as the container state //can be running before xavi is ready to accept traffic. time.Sleep(1 * time.Second) } return containerMapping }
func before(c *cli.Context) error { d, err := dockerclient.NewDockerClient(c.GlobalString("docker"), nil) if err != nil { fmt.Printf("unable to connect to docker at %s: %s\n", c.GlobalString("docker"), err) os.Exit(1) } docker = d return nil }