// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip *net.IP err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: *ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } data, err := container.Export() if err != nil { return job.Errorf("%s: %s", name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() container.LogEvent("export") return engine.StatusOK }
func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) if container := srv.daemon.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK }
// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", image.ID) out.Set("Parent", image.Parent) out.Set("Comment", image.Comment) out.SetAuto("Created", image.Created) out.Set("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.Set("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { container.LogEvent("die") return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK }
func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name) } oldName := job.Args[0] newName := job.Args[1] container, err := daemon.Get(oldName) if err != nil { return job.Error(err) } oldName = container.Name container.Lock() defer container.Unlock() if newName, err = daemon.reserveName(container.ID, newName); err != nil { return job.Errorf("Error when allocating new name: %s", err) } container.Name = newName if err := daemon.containerGraph.Delete(oldName); err != nil { return job.Errorf("Failed to delete container %q: %v", oldName, err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK }
func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { images, _ := daemon.Graph().Map() var imgcount int if images == nil { imgcount = 0 } else { imgcount = len(images) } kernelVersion := "<unknown>" if kv, err := kernel.GetKernelVersion(); err == nil { kernelVersion = kv.String() } operatingSystem := "<unknown>" if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } if inContainer, err := operatingsystem.IsContainerized(); err != nil { utils.Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" } // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the daemon initPath = daemon.SystemInitPath() } cjob := job.Eng.Job("subscribers_count") env, _ := cjob.Stdout.AddEnv() if err := cjob.Run(); err != nil { return job.Error(err) } v := &engine.Env{} v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) v.SetJson("DriverStatus", daemon.GraphDriver().Status()) v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", env.GetInt("count")) v.Set("KernelVersion", kernelVersion) v.Set("OperatingSystem", operatingSystem) v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) v.SetList("Sockets", daemon.Sockets) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] daemon = srv.daemon container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.State.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := srv.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image)) return engine.StatusOK }
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.SetJson("Id", container.ID) out.SetAuto("Created", container.Created) out.SetJson("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.ImageID) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.SetJson("Name", container.Name) out.SetInt("RestartCount", container.RestartCount) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) out.SetJson("AppArmorProfile", container.AppArmorProfile) out.SetList("ExecIDs", container.GetExecIDs()) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } if removeLink { name, err := GetFullContainerName(name) if err != nil { job.Error(err) } parent, n := path.Split(name) if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } pe := daemon.ContainerGraph().Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := daemon.Get(pe.ID()) if parentContainer != nil { parentContainer.DisableLink(n) } if err := daemon.ContainerGraph().Delete(name); err != nil { return job.Error(err) } return engine.StatusOK } if container != nil { if container.IsRunning() { if forceRemove { if err := container.Kill(); err != nil { return job.Errorf("Could not kill running container, cannot remove - %v", err) } } else { return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f") } } if err := daemon.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) } container.LogEvent("destroy") if removeVolume { daemon.DeleteVolumes(container.VolumePaths()) } } return engine.StatusOK }
func (srv *Server) Events(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s", job.Name) } var ( since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) // If no until, disable timeout if until == 0 { timeout.Stop() } listener := make(chan utils.JSONMessage) srv.eventPublisher.Subscribe(listener) defer srv.eventPublisher.Unsubscribe(listener) // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. sendEvent := func(event *utils.JSONMessage) error { if b, err := json.Marshal(event); err == nil { if _, err = job.Stdout.Write(b); err != nil { return err } } return nil } job.Stdout.Write(nil) // Resend every event in the [since, until] time interval. if since != 0 { for _, event := range srv.GetEvents() { if event.Time >= since && (event.Time <= until || until == 0) { if err := sendEvent(&event); err != nil { return job.Error(err) } } } } for { select { case event, ok := <-listener: if !ok { return engine.StatusOK } if err := sendEvent(&event); err != nil { return job.Error(err) } case <-timeout.C: return engine.StatusOK } } }
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } config := runconfig.ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 4194304 { return job.Errorf("Minimum memory limit allowed is 4MB") } if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } if config.Memory > 0 && !daemon.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } if config.Memory > 0 && config.MemorySwap > 0 && config.MemorySwap < config.Memory { return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n") } if config.Memory == 0 && config.MemorySwap > 0 { return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n") } var hostConfig *runconfig.HostConfig if job.EnvExists("HostConfig") { hostConfig = runconfig.ContainerHostConfigFromJob(job) } else { // Older versions of the API don't provide a HostConfig. hostConfig = nil } container, buildWarnings, err := daemon.Create(config, hostConfig, name) if err != nil { if daemon.Graph().IsNotExist(err) { _, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } return job.Error(err) } if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") job.Printf("%s\n", container.ID) for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } return engine.StatusOK }
func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } if endpoint == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } config := runconfig.ContainerConfigFromJob(job) hostConfig := runconfig.ContainerHostConfigFromJob(job) if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) } if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { return job.Errorf("Minimum memory limit allowed is 4MB") } if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") hostConfig.Memory = 0 } if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") hostConfig.MemorySwap = -1 } if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n") } if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n") } container, buildWarnings, err := daemon.Create(config, hostConfig, name) if err != nil { if daemon.Graph().IsNotExist(err) { _, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } return job.Error(err) } if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") job.Printf("%s\n", container.ID) for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } return engine.StatusOK }
// GetRegistryConfig returns current registry configuration. func (s *Service) GetRegistryConfig(job *engine.Job) engine.Status { out := engine.Env{} err := out.SetJson("config", s.Config) if err != nil { return job.Error(err) } out.WriteTo(job.Stdout) return engine.StatusOK }
func InitPidfile(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Error(fmt.Errorf("no pidfile provided to initialize")) } job.Logf("Creating pidfile") if err := utils.CreatePidFile(job.Args[0]); err != nil { return job.Error(err) } return engine.StatusOK }
// ContainerMonitorOp only called at attach mode by docker client to stop monitor server func (daemon *Daemon) ContainerMonitorOp(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] op = job.Getenv("op") err error ) log.Infof("Container %s, monitor operation %s", name, op) if container := daemon.Get(name); container != nil { if container.State.IsRunning() { return job.Errorf("Container %s is running, stop container before stop monitor", name) } if op == "stop" { r := container.monitorState.IsRunning() if !r { // monitor may be stopped by 'docker stop' API log.Infof("Container %s 's monitor is not running", name) return engine.StatusOK } // stop poll container state before kill monitor server container.exMonitor.StopStatePoller() // docker daemon has restarted, we should clean container here if !container.exMonitor.hasCmd { container.exMonitor.Stop() } log.Debugf("Kill monitor server with pid %v", container.monitorState.Pid) // kill monitor server if err := syscall.Kill(container.monitorState.Pid, syscall.SIGTERM); err != nil { log.Errorf("kill monitor server with pid %v error: %v", container.monitorState.Pid, err) return job.Error(err) } // write monitor state container.monitorState.SetStopped(0) if err = container.WriteMonitorState(); err != nil { log.Errorf("write monitor state error: %v", err) return job.Error(err) } } else { return job.Errorf("Monitor op: %s is not supported", op) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } config := runconfig.ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 4194304 { return job.Errorf("Minimum memory limit allowed is 4MB") } if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } if config.Memory > 0 && !daemon.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } var hostConfig *runconfig.HostConfig if job.EnvExists("HostConfig") { hostConfig = runconfig.ContainerHostConfigFromJob(job) } else { // Older versions of the API don't provide a HostConfig. hostConfig = nil } container, buildWarnings, err := daemon.Create(config, hostConfig, name) if err != nil { if daemon.Graph().IsNotExist(err) { _, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } return job.Error(err) } if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") // FIXME: this is necessary because daemon.Create might return a nil container // with a non-nil error. This should not happen! Once it's fixed we // can remove this workaround. if container != nil { job.Printf("%s\n", container.ID) } for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } return engine.StatusOK }
func (daemon *Daemon) ContainerSet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] config []struct { Key string Value string } err error ) job.GetenvJson("config", &config) log.Infof("Setting container: %s. config: %v", name, config) container := daemon.Get(name) if container == nil { return job.Errorf("Can not find container %s", name) } if !container.State.IsRunning() { return job.Errorf("Container %s is not running", name) } var object []interface{} for _, pair := range config { var response struct { Key string Err string Status int } response.Key = pair.Key if err = setConfig(container, pair.Key, pair.Value); err != nil { response.Err = err.Error() response.Status = 255 } else { response.Status = 0 } object = append(object, response) } // save config to disk if err := container.ToDisk(); err != nil { return job.Error(err) } b, err := json.Marshal(object) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK }
// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container, error := daemon.Get(name) if error != nil { return job.Error(error) } outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. func InitServer(job *engine.Job) engine.Status { job.Logf("Creating server") cfg := daemonconfig.ConfigFromJob(job) srv, err := NewServer(job.Eng, cfg) if err != nil { return job.Error(err) } job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon) for name, handler := range map[string]engine.Handler{ "build": srv.Build, } { if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil { return job.Error(err) } } // Install image-related commands from the image subsystem. // See `graph/service.go` if err := srv.daemon.Repositories().Install(job.Eng); err != nil { return job.Error(err) } // Install daemon-related commands from the daemon subsystem. // See `daemon/` if err := srv.daemon.Install(job.Eng); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } var ( config = container.Config newConfig runconfig.Config ) if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK }
func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } if err := container.Pause(); err != nil { return job.Errorf("Cannot pause container %s: %s", name, err) } container.LogEvent("pause") return engine.StatusOK }
// builtins jobs independent of any subsystem func dockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.SetJson("Version", dockerversion.VERSION) v.SetJson("ApiVersion", api.APIVERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { v.Set("KernelVersion", kernelVersion.String()) } if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdTag assigns a new name and tag to an existing image. If the tag already exists, // it is changed and the image previously referenced by the tag loses that reference. // This may cause the old image to be garbage-collected if its reference count reaches zero. // // Syntax: image_tag NEWNAME OLDNAME // Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 func (s *TagStore) CmdTag(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) } var ( newName = job.Args[0] oldName = job.Args[1] ) newRepo, newTag := parsers.ParseRepositoryTag(newName) // FIXME: Set should either parse both old and new name, or neither. // the current prototype is inconsistent. if err := s.Set(newRepo, newTag, oldName, true); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }