func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.SetJson("Id", container.ID) out.SetAuto("Created", container.Created) out.SetJson("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.ImageID) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.SetJson("Name", container.Name) out.SetInt("RestartCount", container.RestartCount) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) out.SetJson("AppArmorProfile", container.AppArmorProfile) out.SetList("ExecIDs", container.GetExecIDs()) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func ContainerConfigFromJob(job *engine.Job) *Config { config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), User: job.Getenv("User"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), Tty: job.GetenvBool("Tty"), OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } if Env := job.GetenvList("Env"); Env != nil { config.Env = Env } if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } return config }
// FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return job.Error(err) } tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return job.Error(err) } defer s.poolRemove("push", repoInfo.LocalName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } img, err := s.graph.Get(repoInfo.LocalName) r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository if localRepo, exists := s.Repositories[repoInfo.LocalName]; exists { if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", repoInfo.CanonicalName)) if _, err := s.pushImage(r, job.Stdout, img.ID, endpoint.String(), token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", image.ID) out.Set("Parent", image.Parent) out.Set("Comment", image.Comment) out.SetAuto("Created", image.Created) out.Set("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.Set("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig }
// serveFd creates an http.Server and sets it up to serve given a socket activated // argument. func serveFd(addr string, job *engine.Job) error { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) ls, e := systemd.ListenFD(addr) if e != nil { return e } chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the // daemon is initialized and installed. Otherwise required handlers // won't be ready. <-activationLock // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } for i := 0; i < len(ls); i++ { err := <-chErrors if err != nil { return err } } return nil }
// FIXME: Allow to interrupt current push when new push of same image is done. func (srv *Server) ImagePush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } img, err := srv.daemon.Graph().Get(localName) r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(srv.daemon.Repositories().Repositories[localName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } if endpoint == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } return engine.StatusOK }
func (s *TagStore) CmdImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID += ":" + tag } if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { log.Errorf("Error logging event 'import' for %s: %s", logID, err) } return engine.StatusOK }
func (s *TagStore) CmdTag(job *engine.Job) error { if len(job.Args) != 2 && len(job.Args) != 3 { return fmt.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } return s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")) }
func tlsConfigFromJob(job *engine.Job) *tlsConfig { verify := job.GetenvBool("TlsVerify") if !job.GetenvBool("Tls") && !verify { return nil } return &tlsConfig{ Verify: verify, Certificate: job.Getenv("TlsCert"), Key: job.Getenv("TlsKey"), CA: job.Getenv("TlsCa"), } }
func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if job.GetenvBool("lineDelim") { w.Header().Set("Content-Type", "application/x-json-stream") } if flush { job.Stdout.Add(utils.NewWriteFlusher(w)) } else { job.Stdout.Add(w) } }
func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) attach = job.GetenvBool("attach") ) if container == nil { return job.Errorf("No such container: %s", name) } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { container.LogEvent("die") return job.Errorf("Cannot start container %s: %s", name, err) } if container.Config.MonitorDriver != MonitorBuiltin { if attach { // attach mode, call monitor server start API by client loc := fmt.Sprintf("unix://%s/%s.sock", MonitorSockDir, container.ID) out := &engine.Env{} out.Set("redirect", loc) if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } } else { // Call monitor server start API _, err := container.daemon.callMonitorAPI(container, "POST", "start") if err != nil { return job.Error(err) } } } return engine.StatusOK }
func (srv *Server) ImageImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) return engine.StatusOK }
func writeEvent(job *engine.Job, event *utils.JSONMessage) error { // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. if b, err := json.Marshal(event); err == nil { if job.GetenvBool("lineDelim") { b = append(b, []byte("\r\n")...) } if _, err = job.Stdout.Write(b); err != nil { return err } } return nil }
func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (s *TagStore) CmdDiffAndApply(job *engine.Job) engine.Status { if n := len(job.Args); n != 3 { return job.Errorf("Usage : %s CONTAINERID SRCIMAGEID TAGIMAGEID", job.Name) } var ( containerID = job.Args[0] localName = job.Args[1] parentImageID = job.Args[2] sf = utils.NewStreamFormatter(job.GetenvBool("json")) rate = 0 // the rate of image layer data is written to the container per second ) if job.EnvExists("rate") { rate = job.GetenvInt("rate") } img, err := s.LookupImage(localName) if err != nil { return job.Error(err) } dest := s.graph.Driver().MountPath(containerID) fi, err := os.Stat(dest) if err != nil && !os.IsExist(err) { return job.Error(err) } if !fi.IsDir() { return job.Errorf(" Dest %s is not dir", dest) } job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Diff two mirrors(%s - %s)", img.ID, parentImageID), nil)) fs, err := s.graph.Driver().Diff(img.ID, parentImageID, nil) if err != nil { return job.Error(err) } defer fs.Close() job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil)) job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil)) err = archive.ApplyLayer(dest, fs, int64(rate)) if err != nil { return job.Error(err) } job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil)) return engine.StatusOK }
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if job.EnvExists("HostConfig") { hostConfig := HostConfig{} job.GetenvJson("HostConfig", &hostConfig) return &hostConfig } hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), IpcMode: IpcMode(job.Getenv("IpcMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { hostConfig.ExtraHosts = ExtraHosts } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig }
func (daemon *Daemon) ImageDelete(job *engine.Job) error { if n := len(job.Args); n != 1 { return fmt.Errorf("Usage: %s IMAGE", job.Name) } list := []types.ImageDelete{} if err := daemon.DeleteImage(job.Eng, job.Args[0], &list, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return err } if len(list) == 0 { return fmt.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if err := json.NewEncoder(job.Stdout).Encode(list); err != nil { return err } return nil }
func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") useIpv6 = job.GetenvBool("UseIpv6") ) split := func(p string) (string, string) { parts := strings.Split(p, "/") return parts[0], parts[1] } for _, p := range ports { port, proto := split(p) if output, err := iptables.Raw(useIpv6, action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", parentIP, "--dport", port, "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } if output, err := iptables.Raw(useIpv6, action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", childIP, "--sport", port, "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } } return engine.StatusOK }
// NewServer sets up the required Server and does protocol specific checking. func NewServer(proto, addr string, job *engine.Job) (Server, error) { var ( err error l net.Listener r = createRouter( job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"), ) ) switch proto { case "tcp": if !job.GetenvBool("TlsVerify") { logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } if l, err = NewTcpSocket(addr, tlsConfigFromJob(job)); err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } default: return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } }
func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return nil, err } if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { return nil, err } if err := os.Chmod(addr, 0660); err != nil { return nil, err } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func LinkContainers(job *engine.Job) error { var ( action = job.Args[0] nfAction iptables.Action childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) switch action { case "-A": nfAction = iptables.Append case "-I": nfAction = iptables.Insert case "-D": nfAction = iptables.Delete default: return fmt.Errorf("Invalid action '%s' specified", action) } ip1 := net.ParseIP(parentIP) if ip1 == nil { return fmt.Errorf("Parent IP '%s' is invalid", parentIP) } ip2 := net.ParseIP(childIP) if ip2 == nil { return fmt.Errorf("Child IP '%s' is invalid", childIP) } chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface} for _, p := range ports { port := nat.Port(p) if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil { return err } } return nil }
func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } if removeLink { name, err := GetFullContainerName(name) if err != nil { job.Error(err) } parent, n := path.Split(name) if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } pe := daemon.ContainerGraph().Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := daemon.Get(pe.ID()) if parentContainer != nil { parentContainer.DisableLink(n) } if err := daemon.ContainerGraph().Delete(name); err != nil { return job.Error(err) } return engine.StatusOK } if container != nil { if container.IsRunning() { if forceRemove { if err := container.Kill(); err != nil { return job.Errorf("Could not kill running container, cannot remove - %v", err) } } else { return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f") } } if err := daemon.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) } container.LogEvent("destroy") if removeVolume { daemon.DeleteVolumes(container.VolumePaths()) } } return engine.StatusOK }
func ExecConfigFromJob(job *engine.Job) *ExecConfig { execConfig := &ExecConfig{ User: job.Getenv("User"), Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), Container: job.Getenv("Container"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } if cmd := job.GetenvList("Cmd"); cmd != nil { execConfig.Cmd = cmd } return execConfig }
func (daemon *Daemon) ContainerRestore(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER CHECKPOINT_ID", job.Name) } name := job.Args[0] container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } checkpointID := job.Args[1] checkpoint := container.Checkpoints[checkpointID] if checkpoint == nil { return job.Errorf("No such checkpoint %s for container %s", checkpointID, container.ID) } containerClone, err := daemon.cloneContainer(container, checkpoint.ImageID) if err != nil { return job.Error(err) } log.Infof("cloned container ID=%s", containerClone.ID) checkpoint, err = checkpoint.clone(containerClone) // defer checkpoint.cleanFiles() if err != nil { return job.Error(err) } if err := containerClone.Restore(checkpoint, job.GetenvBool("clone")); err != nil { return job.Errorf("Cannot restore container %s: %s", name, err) } containerClone.LogEvent("restore") job.Printf("%s\n", containerClone.ID) return engine.StatusOK }
// ConfigFromJob creates and returns a new DaemonConfig object // by parsing the contents of a job's environment. func ConfigFromJob(job *engine.Job) *Config { config := &Config{ Pidfile: job.Getenv("Pidfile"), Root: job.Getenv("Root"), AutoRestart: job.GetenvBool("AutoRestart"), EnableIptables: job.GetenvBool("EnableIptables"), EnableIpForward: job.GetenvBool("EnableIpForward"), BridgeIP: job.Getenv("BridgeIP"), BridgeIface: job.Getenv("BridgeIface"), DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), ExecDriver: job.Getenv("ExecDriver"), EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"), } if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil { config.GraphOptions = graphOpts } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil { config.DnsSearch = dnsSearch } if mtu := job.GetenvInt("Mtu"); mtu != 0 { config.Mtu = mtu } else { config.Mtu = GetDefaultNetworkMtu() } config.DisableNetwork = config.BridgeIface == DisableNetworkBridge if sockets := job.GetenvList("Sockets"); sockets != nil { config.Sockets = sockets } return config }
func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) for _, value := range ports { port := nat.Port(value) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", port.Proto(), "-s", parentIP, "--dport", strconv.Itoa(port.Int()), "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", port.Proto(), "-s", childIP, "--sport", strconv.Itoa(port.Int()), "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } } return engine.StatusOK }