// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) error { if len(job.Args) != 1 { return fmt.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return err } job.Stdout.Write(b) return nil } out := &engine.Env{} out.SetJson("Id", image.ID) out.SetJson("Parent", image.Parent) out.SetJson("Comment", image.Comment) out.SetAuto("Created", image.Created) out.SetJson("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.SetJson("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return err } return nil } return fmt.Errorf("No such image: %s", name) }
func (s *TagStore) CmdViz(job *engine.Job) engine.Status { images, _ := s.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *image.Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } for id, repos := range s.GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK }
func (s *TagStore) CmdTag(job *engine.Job) error { if len(job.Args) != 2 && len(job.Args) != 3 { return fmt.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } return s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")) }
func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } if version.GreaterThan("1.0") { w.Header().Set("Content-Type", "application/x-tar") } var job *engine.Job if name, ok := vars["name"]; ok { job = eng.Job("image_export", name) } else { job = eng.Job("image_export", r.Form["names"]...) } job.Stdout.Add(w) return job.Run() }
// Search queries the public registry for images matching the specified // search terms, and returns the results. // // Argument syntax: search TERM // // Option environment: // 'authConfig': json-encoded credentials to authenticate against the registry. // The search extends to images only accessible via the credentials. // // 'metaHeaders': extra HTTP headers to include in the request to the registry. // The headers should be passed as a json-encoded dictionary. // // Output: // Results are sent as a collection of structured messages (using engine.Table). // Each result is sent as a separate message. // Results are ordered by number of stars on the public registry. func (s *Service) Search(job *engine.Job) error { if n := len(job.Args); n != 1 { return fmt.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] metaHeaders = map[string][]string{} authConfig = &AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) repoInfo, err := ResolveRepositoryInfo(job, term) if err != nil { return err } // *TODO: Search multiple indexes. endpoint, err := repoInfo.GetEndpoint() if err != nil { return err } r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return err } results, err := r.SearchRepositories(repoInfo.GetSearchTerm()) if err != nil { return err } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { out := &engine.Env{} out.Import(result) outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return err } return nil }
// release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeNetwork, containerInterface.IP); err != nil { log.Infof("Unable to release ip %s", err) } return engine.StatusOK }
// serveFd creates an http.Server and sets it up to serve given a socket activated // argument. func serveFd(addr string, job *engine.Job) error { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) ls, e := systemd.ListenFD(addr) if e != nil { return e } chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the // daemon is initialized and installed. Otherwise required handlers // won't be ready. <-activationLock // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } for i := 0; i < len(ls); i++ { err := <-chErrors if err != nil { return err } } return nil }
// Auth contacts the public registry with the provided credentials, // and returns OK if authentication was sucessful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(job *engine.Job) error { var ( authConfig = new(AuthConfig) endpoint *Endpoint index *IndexInfo status string err error ) job.GetenvJson("authConfig", authConfig) addr := authConfig.ServerAddress if addr == "" { // Use the official registry address if not specified. addr = IndexServerAddress() } if index, err = ResolveIndexInfo(job, addr); err != nil { return err } if endpoint, err = NewEndpoint(index); err != nil { log.Errorf("unable to get new registry endpoint: %s", err) return err } authConfig.ServerAddress = endpoint.String() if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil { log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err) return err } log.Infof("successful registry login for endpoint %s: %s", endpoint, status) job.Printf("%s\n", status) return nil }
// CmdSet stores a new image in the graph. // Images are stored in the graph using 4 elements: // - A user-defined ID // - A collection of metadata describing the image // - A directory tree stored as a tar archive (also called the "layer") // - A reference to a "parent" ID on top of which the layer should be applied // // NOTE: even though the parent ID is only useful in relation to the layer and how // to apply it (ie you could represent the full directory tree as 'parent_layer + layer', // it is treated as a top-level property of the image. This is an artifact of early // design and should probably be cleaned up in the future to simplify the design. // // Syntax: image_set ID // Input: // - Layer content must be streamed in tar format on stdin. An empty input is // valid and represents a nil layer. // // - Image metadata must be passed in the command environment. // 'json': a json-encoded object with all image metadata. // It will be stored as-is, without any encoding/decoding artifacts. // That is a requirement of the current registry client implementation, // because a re-encoded json might invalidate the image checksum at // the next upload, even with functionaly identical content. func (s *TagStore) CmdSet(job *engine.Job) error { if len(job.Args) != 1 { return fmt.Errorf("usage: %s NAME", job.Name) } var ( imgJSON = []byte(job.Getenv("json")) layer = job.Stdin ) if len(imgJSON) == 0 { return fmt.Errorf("mandatory key 'json' is not set") } // We have to pass an *image.Image object, even though it will be completely // ignored in favor of the redundant json data. // FIXME: the current prototype of Graph.Register is stupid and redundant. img, err := image.NewImgJSON(imgJSON) if err != nil { return err } if err := s.graph.Register(img, layer); err != nil { return err } return nil }
func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { return nil, err } if err := os.Chmod(addr, 0660); err != nil { return nil, err } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { execConfig := &ExecConfig{ // TODO(vishh): Expose 'User' once it is supported. //User: job.Getenv("User"), // TODO(vishh): Expose 'Privileged' once it is supported. //Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } cmd := job.GetenvList("Cmd") if len(cmd) == 0 { return nil, fmt.Errorf("No exec command specified") } execConfig.Cmd = cmd return execConfig, nil }
func LinkContainers(job *engine.Job) error { var ( action = job.Args[0] nfAction iptables.Action childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) switch action { case "-A": nfAction = iptables.Append case "-I": nfAction = iptables.Insert case "-D": nfAction = iptables.Delete default: return fmt.Errorf("Invalid action '%s' specified", action) } ip1 := net.ParseIP(parentIP) if ip1 == nil { return fmt.Errorf("Parent IP '%s' is invalid", parentIP) } ip2 := net.ParseIP(childIP) if ip2 == nil { return fmt.Errorf("Child IP '%s' is invalid", childIP) } chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface} for _, p := range ports { port := nat.Port(p) if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil { return err } } return nil }
// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
// CmdImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) } // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return job.Error(err) } defer os.RemoveAll(tempdir) rootRepoMap := map[string]Repository{} addKey := func(name string, tag string, id string) { log.Debugf("add key [%s:%s]", name, tag) if repo, ok := rootRepoMap[name]; !ok { rootRepoMap[name] = Repository{tag: id} } else { repo[tag] = id } } for _, name := range job.Args { log.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] if rootRepo != nil { // this is a base repo name, like 'busybox' for tag, id := range rootRepo { addKey(name, tag, id) if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } } else { img, err := s.LookupImage(name) if err != nil { return job.Error(err) } if img != nil { // This is a named image like 'busybox:latest' repoName, repoTag := parsers.ParseRepositoryTag(name) // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { addKey(repoName, repoTag, img.ID) } if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) } } else { // this must be an ID that didn't get looked up just right? if err := s.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } } log.Debugf("End Serializing %s", name) } // write repositories, if there is something to write if len(rootRepoMap) > 0 { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { return job.Error(err) } } else { log.Debugf("There were no repositories to write") } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return job.Error(err) } defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } log.Debugf("End export job: %s", job.Name) return engine.StatusOK }
func InitDriver(job *engine.Job) error { var ( networkv4 *net.IPNet networkv6 *net.IPNet addrv4 net.Addr addrsv6 []net.Addr enableIPTables = job.GetenvBool("EnableIptables") enableIPv6 = job.GetenvBool("EnableIPv6") icc = job.GetenvBool("InterContainerCommunication") ipMasq = job.GetenvBool("EnableIpMasq") ipForward = job.GetenvBool("EnableIpForward") bridgeIP = job.Getenv("BridgeIP") bridgeIPv6 = "fe80::1/64" fixedCIDR = job.Getenv("FixedCIDR") fixedCIDRv6 = job.Getenv("FixedCIDRv6") ) if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { defaultBindingIP = net.ParseIP(defaultIP) } bridgeIface = job.Getenv("BridgeIface") usingDefaultBridge := false if bridgeIface == "" { usingDefaultBridge = true bridgeIface = DefaultNetworkBridge } addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface) if err != nil { // No Bridge existent, create one // If we're not using the default bridge, fail without trying to create it if !usingDefaultBridge { return err } // If the iface is not found, try to create it if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil { return err } addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { return err } if fixedCIDRv6 != "" { // Setting route to global IPv6 subnet log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface) if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil { log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface) } } } else { // Bridge exists already, getting info... // Validate that the bridge ip matches the ip specified by BridgeIP if bridgeIP != "" { networkv4 = addrv4.(*net.IPNet) bip, _, err := net.ParseCIDR(bridgeIP) if err != nil { return err } if !networkv4.IP.Equal(bip) { return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip) } } // A bridge might exist but not have any IPv6 addr associated with it yet // (for example, an existing Docker installation that has only been used // with IPv4 and docker0 already is set up) In that case, we can perform // the bridge init for IPv6 here, else we will error out below if --ipv6=true if len(addrsv6) == 0 && enableIPv6 { if err := setupIPv6Bridge(bridgeIPv6); err != nil { return err } // Recheck addresses now that IPv6 is setup on the bridge addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { return err } } // TODO: Check if route to fixedCIDRv6 is set } if enableIPv6 { bip6, _, err := net.ParseCIDR(bridgeIPv6) if err != nil { return err } found := false for _, addrv6 := range addrsv6 { networkv6 = addrv6.(*net.IPNet) if networkv6.IP.Equal(bip6) { found = true break } } if !found { return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6) } } networkv4 = addrv4.(*net.IPNet) if enableIPv6 { if len(addrsv6) == 0 { return errors.New("IPv6 enabled but no IPv6 detected") } bridgeIPv6Addr = networkv6.IP } // Configure iptables for link support if enableIPTables { if err := setupIPTables(addrv4, icc, ipMasq); err != nil { return err } } if ipForward { // Enable IPv4 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { log.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err) } if fixedCIDRv6 != "" { // Enable IPv6 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil { log.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err) } if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil { log.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err) } } } // We can always try removing the iptables if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil { return err } if enableIPTables { _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat) if err != nil { return err } chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { return err } portmapper.SetIptablesChain(chain) } bridgeIPv4Network = networkv4 if fixedCIDR != "" { _, subnet, err := net.ParseCIDR(fixedCIDR) if err != nil { return err } log.Debugf("Subnet: %v", subnet) if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil { return err } } if fixedCIDRv6 != "" { _, subnet, err := net.ParseCIDR(fixedCIDRv6) if err != nil { return err } log.Debugf("Subnet: %v", subnet) if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil { return err } globalIPv6Network = subnet } // Block BridgeIP in IP allocator ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP) // https://github.com/docker/docker/issues/2768 job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP) for name, f := range map[string]engine.Handler{ "allocate_interface": Allocate, "release_interface": Release, "allocate_port": AllocatePort, "link": LinkContainers, } { if err := job.Eng.Register(name, f); err != nil { return err } } return nil }
func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string mirrors []string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } var isOfficial bool if endpoint.VersionString(1) == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName isOfficial = isOfficialName(remoteName) if isOfficial && strings.IndexRune(remoteName, '/') == -1 { remoteName = "library/" + remoteName } // Use provided mirrors, if any mirrors = s.mirrors } logName := localName if tag != "" { logName += ":" + tag } if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { return job.Errorf("error updating trust base graph: %s", err) } if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return engine.StatusOK } else if err != registry.ErrDoesNotExist { log.Errorf("Error from V2 registry: %s", err) } } if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil { return job.Error(err) } if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return engine.StatusOK }
// FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) error { if n := len(job.Args); n != 1 { return fmt.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return err } tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return err } defer s.poolRemove("push", repoInfo.LocalName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return err } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err != nil { return err } reposLen := 1 if tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository localRepo, exists := s.Repositories[repoInfo.LocalName] if !exists { return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName) } if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 { err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf) if err == nil { return nil } if err != ErrV2RegistryUnavailable { return fmt.Errorf("Error pushing to registry: %s", err) } } if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { return err } return nil }
func (s *TagStore) CmdPull(job *engine.Job) error { if n := len(job.Args); n != 1 && n != 2 { return fmt.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return err } if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag)) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName)) <-c return nil } return err } defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag)) log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return err } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return err } logName := repoInfo.LocalName if tag != "" { logName = utils.ImageReference(logName, tag) } if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) { if repoInfo.Official { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { log.Errorf("error updating trust base graph: %s", err) } } log.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName) if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil { if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return nil } else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable { log.Errorf("Error from V2 registry: %s", err) } log.Debug("image does not exist on v2 registry, falling back to v1") } log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName) if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil { return err } if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return nil }
func ContainerConfigFromJob(job *engine.Job) *Config { config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), User: job.Getenv("User"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), Tty: job.GetenvBool("Tty"), OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), MacAddress: job.Getenv("MacAddress"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } if Env := job.GetenvList("Env"); Env != nil { config.Env = Env } if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } job.GetenvJson("Labels", &config.Labels) if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } return config }
func (s *TagStore) CmdImages(job *engine.Job) error { var ( allImages map[string]*image.Image err error filt_tagged = true filt_label = false ) imageFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return err } for name := range imageFilters { if _, ok := acceptedImageFilterTags[name]; !ok { return fmt.Errorf("Invalid filter '%s'", name) } } if i, ok := imageFilters["dangling"]; ok { for _, value := range i { if strings.ToLower(value) == "true" { filt_tagged = false } } } _, filt_label = imageFilters["label"] if job.GetenvBool("all") && filt_tagged { allImages, err = s.graph.Map() } else { allImages, err = s.graph.Heads() } if err != nil { return err } lookup := make(map[string]*engine.Env) s.Lock() for repoName, repository := range s.Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), repoName); !match { continue } } for ref, id := range repository { imgRef := utils.ImageReference(repoName, ref) image, err := s.graph.Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s: %s", id, imgRef, err) continue } if out, exists := lookup[id]; exists { if filt_tagged { if utils.DigestReference(ref) { out.SetList("RepoDigests", append(out.GetList("RepoDigests"), imgRef)) } else { // Tag Ref. out.SetList("RepoTags", append(out.GetList("RepoTags"), imgRef)) } } } else { // get the boolean list for if only the untagged images are requested delete(allImages, id) if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { continue } if filt_tagged { out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) out.SetJson("Labels", image.ContainerConfig.Labels) if utils.DigestReference(ref) { out.SetList("RepoTags", []string{}) out.SetList("RepoDigests", []string{imgRef}) } else { out.SetList("RepoTags", []string{imgRef}) out.SetList("RepoDigests", []string{}) } lookup[id] = out } } } } s.Unlock() outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { outs.Add(value) } // Display images which aren't part of a repository/tag if job.Getenv("filter") == "" || filt_label { for _, image := range allImages { if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { continue } out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetList("RepoTags", []string{"<none>:<none>"}) out.SetList("RepoDigests", []string{"<none>@<none>"}) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) out.SetJson("Labels", image.ContainerConfig.Labels) outs.Add(out) } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return err } return nil }
// Allocate an external port and map it to the interface func AllocatePort(job *engine.Job) engine.Status { var ( err error ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") hostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") network = currentInterfaces.Get(id) ) if hostIP != "" { ip = net.ParseIP(hostIP) if ip == nil { return job.Errorf("Bad parameter: invalid host ip %s", hostIP) } } // host ip, proto, and host port var container net.Addr switch proto { case "tcp": container = &net.TCPAddr{IP: network.IP, Port: containerPort} case "udp": container = &net.UDPAddr{IP: network.IP, Port: containerPort} default: return job.Errorf("unsupported address type %s", proto) } // // Try up to 10 times to get a port that's not already allocated. // // In the event of failure to bind, return the error that portmapper.Map // yields. // var host net.Addr for i := 0; i < MaxAllocatedPortAttempts; i++ { if host, err = portmapper.Map(container, ip, hostPort); err == nil { break } // There is no point in immediately retrying to map an explicitly // chosen port. if hostPort != 0 { job.Logf("Failed to allocate and map port %d: %s", hostPort, err) break } job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { return job.Error(err) } network.PortMappings = append(network.PortMappings, host) out := engine.Env{} switch netAddr := host.(type) { case *net.TCPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) case *net.UDPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) } if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func InitDriver(job *engine.Job) engine.Status { var ( network *net.IPNet enableIPTables = job.GetenvBool("EnableIptables") icc = job.GetenvBool("InterContainerCommunication") ipMasq = job.GetenvBool("EnableIpMasq") ipForward = job.GetenvBool("EnableIpForward") bridgeIP = job.Getenv("BridgeIP") fixedCIDR = job.Getenv("FixedCIDR") ) if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { defaultBindingIP = net.ParseIP(defaultIP) } bridgeIface = job.Getenv("BridgeIface") usingDefaultBridge := false if bridgeIface == "" { usingDefaultBridge = true bridgeIface = DefaultNetworkBridge } addr, err := networkdriver.GetIfaceAddr(bridgeIface) if err != nil { // If we're not using the default bridge, fail without trying to create it if !usingDefaultBridge { return job.Error(err) } // If the bridge interface is not found (or has no address), try to create it and/or add an address if err := configureBridge(bridgeIP); err != nil { return job.Error(err) } addr, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { return job.Error(err) } network = addr.(*net.IPNet) } else { network = addr.(*net.IPNet) // validate that the bridge ip matches the ip specified by BridgeIP if bridgeIP != "" { bip, _, err := net.ParseCIDR(bridgeIP) if err != nil { return job.Error(err) } if !network.IP.Equal(bip) { return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip) } } } // Configure iptables for link support if enableIPTables { if err := setupIPTables(addr, icc, ipMasq); err != nil { return job.Error(err) } } if ipForward { // Enable IPv4 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) } } // We can always try removing the iptables if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil { return job.Error(err) } if enableIPTables { _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat) if err != nil { return job.Error(err) } chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { return job.Error(err) } portmapper.SetIptablesChain(chain) } bridgeNetwork = network if fixedCIDR != "" { _, subnet, err := net.ParseCIDR(fixedCIDR) if err != nil { return job.Error(err) } log.Debugf("Subnet: %v", subnet) if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil { return job.Error(err) } } // https://github.com/docker/docker/issues/2768 job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) for name, f := range map[string]engine.Handler{ "allocate_interface": Allocate, "release_interface": Release, "allocate_port": AllocatePort, "link": LinkContainers, } { if err := job.Eng.Register(name, f); err != nil { return job.Error(err) } } return engine.StatusOK }
func (s *TagStore) CmdImport(job *engine.Job) error { if n := len(job.Args); n != 2 && n != 3 { return fmt.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response stdoutBuffer = bytes.NewBuffer(nil) newConfig runconfig.Config ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return err } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, Out: job.Stdout, Formatter: sf, Size: int(resp.ContentLength), NewLines: true, ID: "", Action: "Importing", }) defer progressReader.Close() archive = progressReader } buildConfigJob := job.Eng.Job("build_config") buildConfigJob.Stdout.Add(stdoutBuffer) buildConfigJob.Setenv("changes", job.Getenv("changes")) // FIXME this should be remove when we remove deprecated config param buildConfigJob.Setenv("config", job.Getenv("config")) if err := buildConfigJob.Run(); err != nil { return err } if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { return err } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig) if err != nil { return err } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return err } } job.Stdout.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) } if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { log.Errorf("Error logging event 'import' for %s: %s", logID, err) } return nil }
func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { if !job.GetenvBool("TlsVerify") { log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { var tlsCa string if job.GetenvBool("TlsVerify") { tlsCa = job.Getenv("TlsCa") } l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) if err != nil { return nil, err } } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil }
func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { return job.Errorf("CmdLoad is not supported on this platform") }
// Creates an image from Pull or from Import func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := ®istry.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } job = eng.Job("pull", image, tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) job.Stdin.Add(r.Body) job.SetenvList("changes", r.Form["changes"]) } if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil }
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if job.EnvExists("HostConfig") { hostConfig := HostConfig{} job.GetenvJson("HostConfig", &hostConfig) // FIXME: These are for backward compatibility, if people use these // options with `HostConfig`, we should still make them workable. if job.EnvExists("Memory") && hostConfig.Memory == 0 { hostConfig.Memory = job.GetenvInt64("Memory") } if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 { hostConfig.MemorySwap = job.GetenvInt64("MemorySwap") } if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 { hostConfig.CpuShares = job.GetenvInt64("CpuShares") } if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { hostConfig.CpusetCpus = job.Getenv("Cpuset") } return &hostConfig } hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), CpusetCpus: job.Getenv("CpusetCpus"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), IpcMode: IpcMode(job.Getenv("IpcMode")), PidMode: PidMode(job.Getenv("PidMode")), ReadonlyRootfs: job.GetenvBool("ReadonlyRootfs"), CgroupParent: job.Getenv("CgroupParent"), } // FIXME: This is for backward compatibility, if people use `Cpuset` // in json, make it workable, we will only pass hostConfig.CpusetCpus // to execDriver. if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { hostConfig.CpusetCpus = job.Getenv("Cpuset") } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) job.GetenvJson("Ulimits", &hostConfig.Ulimits) job.GetenvJson("LogConfig", &hostConfig.LogConfig) hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { hostConfig.ExtraHosts = ExtraHosts } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig }
// Allocate a network interface func Allocate(job *engine.Job) error { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6")) globalIPv6 net.IP ) ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP) if err != nil { return err } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } if globalIPv6Network != nil { // If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address netmask_ones, _ := globalIPv6Network.Mask.Size() if requestedIPv6 == nil && netmask_ones <= 80 { requestedIPv6 = make(net.IP, len(globalIPv6Network.IP)) copy(requestedIPv6, globalIPv6Network.IP) for i, h := range mac { requestedIPv6[i+10] = h } } globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6) if err != nil { log.Errorf("Allocator: RequestIP v6: %v", err) return err } log.Infof("Allocated IPv6 %s", globalIPv6) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeIPv4Network.Mask.String()) out.Set("Gateway", bridgeIPv4Network.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeIPv4Network.Mask.Size() out.SetInt("IPPrefixLen", size) // If linklocal IPv6 localIPv6Net, err := linkLocalIPv6FromMac(mac.String()) if err != nil { return err } localIPv6, _, _ := net.ParseCIDR(localIPv6Net) out.Set("LinkLocalIPv6", localIPv6.String()) out.Set("MacAddress", mac.String()) if globalIPv6Network != nil { out.Set("GlobalIPv6", globalIPv6.String()) sizev6, _ := globalIPv6Network.Mask.Size() out.SetInt("GlobalIPv6PrefixLen", sizev6) out.Set("IPv6Gateway", bridgeIPv6Addr.String()) } currentInterfaces.Set(id, &networkInterface{ IP: ip, IPv6: globalIPv6, }) out.WriteTo(job.Stdout) return nil }