func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] if container := srv.daemon.Get(name); container != nil { outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
func (s *TagStore) CmdViz(job *engine.Job) engine.Status { images, _ := s.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *image.Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } for id, repos := range s.GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK }
// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", image.ID) out.Set("Parent", image.Parent) out.Set("Comment", image.Comment) out.SetAuto("Created", image.Created) out.Set("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.Set("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
// Search queries the public registry for images matching the specified // search terms, and returns the results. // // Argument syntax: search TERM // // Option environment: // 'authConfig': json-encoded credentials to authenticate against the registry. // The search extends to images only accessible via the credentials. // // 'metaHeaders': extra HTTP headers to include in the request to the registry. // The headers should be passed as a json-encoded dictionary. // // Output: // Results are sent as a collection of structured messages (using engine.Table). // Each result is sent as a separate message. // Results are ordered by number of stars on the public registry. func (s *Service) Search(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] metaHeaders = map[string][]string{} authConfig = &AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) r, err := NewRegistry(authConfig, HTTPRequestFactory(metaHeaders), IndexServerAddress(), true) if err != nil { return job.Error(err) } results, err := r.SearchRepositories(term) if err != nil { return job.Error(err) } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { out := &engine.Env{} out.Import(result) outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func Commission(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } fqdn := job.Getenv("Fqdn") configuration := job.Eng.Hack_GetGlobalVar("configuration").(types.KraneConfiguration) parameters := url.Values{} parameters.Set("name", name) parameters.Set("fqdn", fqdn) parameters.Set("plan", job.Getenv("Plan")) parameters.Set("ssh_profile", configuration.Production.SshProfile) id, err := configuration.Driver.Create(parameters) if err != nil { job.Errorf("unable to commission ship %s", fqdn) } job.Stdout.Write([]byte(id)) ship := configuration.Driver.FindShip(name) newjob := job.Eng.Job("ssh_tunnel", ship.Fqdn, "true") go newjob.Run() return engine.StatusOK }
func (s *TagStore) CmdManifest(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] tag := job.Getenv("tag") if tag == "" { tag = "latest" } // Resolve the Repository name from fqn to endpoint + name repoInfo, err := registry.ParseRepositoryInfo(name) if err != nil { return job.Error(err) } manifestBytes, err := s.newManifest(name, repoInfo.RemoteName, tag) if err != nil { return job.Error(err) } _, err = job.Stdout.Write(manifestBytes) if err != nil { return job.Error(err) } return engine.StatusOK }
// release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil { log.Infof("Unable to release IPv4 %s", err) } if globalIPv6Network != nil { if err := ipallocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil { log.Infof("Unable to release IPv6 %s", err) } } return engine.StatusOK }
func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) container, err := daemon.Get(name) if err != nil { return job.Error(err) } data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } var ( config = container.Config newConfig runconfig.Config ) if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK }
func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) if container := srv.daemon.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// FIXME: this is a shim to allow breaking up other parts of Server without // dragging the sphagetti dependency along. func (srv *Server) Log(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("usage: %s ACTION ID FROM", job.Name) } srv.LogEvent(job.Args[0], job.Args[1], job.Args[2]) return engine.StatusOK }
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.SetJson("Id", container.ID) out.SetAuto("Created", container.Created) out.SetJson("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.ImageID) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.SetJson("Name", container.Name) out.SetInt("RestartCount", container.RestartCount) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) out.SetJson("AppArmorProfile", container.AppArmorProfile) out.SetList("ExecIDs", container.GetExecIDs()) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return job.Error(err) } tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return job.Error(err) } defer s.poolRemove("push", repoInfo.LocalName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } img, err := s.graph.Get(repoInfo.LocalName) r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository if localRepo, exists := s.Repositories[repoInfo.LocalName]; exists { if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", repoInfo.CanonicalName)) if _, err := s.pushImage(r, job.Stdout, img.ID, endpoint.String(), token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
func ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] ) v := url.Values{} if job.EnvExists("t") { v.Set("t", strconv.Itoa(job.GetenvInt("t"))) } ship := shipWithContainerId(job, name) if ship != nil { _, statusCode, err := httpRequest("POST", "http", ship.Fqdn, ship.Port, "/containers/"+name+"/restart?"+v.Encode(), nil, false) if statusCode == 404 { return job.Errorf("No such container: %s\n", name) } else if statusCode == 500 { return job.Errorf("Server error trying to restart %s: %s\n", name, err) } else if (statusCode >= 200) && (statusCode < 300) { fmt.Printf("Container %s restarted\n", name) } else { return job.Errorf("Cannot restart container %s: %s\n", name, err) } } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK }
// ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK }
// FIXME: Allow to interrupt current push when new push of same image is done. func (srv *Server) ImagePush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } img, err := srv.daemon.Graph().Get(localName) r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(srv.daemon.Repositories().Repositories[localName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
func (e *Events) Log(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("usage: %s ACTION ID FROM", job.Name) } // not waiting for receivers go e.log(job.Args[0], job.Args[1], job.Args[2]) return engine.StatusOK }
func (srv *Server) Events(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s", job.Name) } var ( since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) // If no until, disable timeout if until == 0 { timeout.Stop() } listener := make(chan utils.JSONMessage) srv.eventPublisher.Subscribe(listener) defer srv.eventPublisher.Unsubscribe(listener) // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. sendEvent := func(event *utils.JSONMessage) error { if b, err := json.Marshal(event); err == nil { if _, err = job.Stdout.Write(b); err != nil { return err } } return nil } job.Stdout.Write(nil) // Resend every event in the [since, until] time interval. if since != 0 { for _, event := range srv.GetEvents() { if event.Time >= since && (event.Time <= until || until == 0) { if err := sendEvent(&event); err != nil { return job.Error(err) } } } } for { select { case event, ok := <-listener: if !ok { return engine.StatusOK } if err := sendEvent(&event); err != nil { return job.Error(err) } case <-timeout.C: return engine.StatusOK } } }
func (s *TagStore) CmdImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID += ":" + tag } if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { log.Errorf("Error logging event 'import' for %s: %s", logID, err) } return engine.StatusOK }
func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } if endpoint == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } return engine.StatusOK }
func (srv *Server) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := srv.daemon.Get(name); container != nil { status, _ := container.State.WaitStop(-1 * time.Second) job.Printf("%d\n", status) return engine.StatusOK } return job.Errorf("%s: no such container: %s", job.Name, name) }
func getPrivateKeys(job *engine.Job) ssh.Signer { privateBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa") if err != nil { job.Errorf("Failed to load private key") } private, err := ssh.ParsePrivateKey(privateBytes) if err != nil { job.Errorf("Failed to parse private key") } return private }
func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Errorf("%s: %v", job.Name, err) } status, _ := container.WaitStop(-1 * time.Second) job.Printf("%d\n", status) return engine.StatusOK }
// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { container.LogEvent("die") return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK }
func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name) } oldName := job.Args[0] newName := job.Args[1] container := daemon.Get(oldName) if container == nil { return job.Errorf("No such container: %s", oldName) } oldName = container.Name container.Lock() defer container.Unlock() if _, err := daemon.reserveName(container.ID, newName); err != nil { return job.Errorf("Error when allocating new name: %s", err) } container.Name = newName if err := daemon.containerGraph.Delete(oldName); err != nil { return job.Errorf("Failed to delete container %q: %v", oldName, err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK }
func (srv *Server) ImageImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) return engine.StatusOK }
func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] daemon = srv.daemon container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.State.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := srv.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image)) return engine.StatusOK }