func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } useJSON := version.GreaterThan("1.0") if useJSON { w.Header().Set("Content-Type", "application/x-tar") } output := utils.NewWriteFlusher(w) imageExportConfig := &graph.ImageExportConfig{Outstream: output} if name, ok := vars["name"]; ok { imageExportConfig.Names = []string{name} } else { imageExportConfig.Names = r.Form["names"] } if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil { if !output.Flushed() { return err } sf := streamformatter.NewStreamFormatter(useJSON) output.Write(sf.FormatError(err)) } return nil }
// Creates an image from Pull or from Import func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := ®istry.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } job = eng.Job("pull", image, tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) job.Stdin.Add(r.Body) job.SetenvList("changes", r.Form["changes"]) } if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil }
func handleContainerLogs(w http.ResponseWriter, r *http.Request) { var outStream, errStream io.Writer outStream = utils.NewWriteFlusher(w) // not sure how to test follow if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), 500) } stdout, stderr := getBoolValue(r.Form.Get("stdout")), getBoolValue(r.Form.Get("stderr")) if stderr { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) } if stdout { outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } var i int if tail, err := strconv.Atoi(r.Form.Get("tail")); err == nil && tail > 0 { i = 50 - tail if i < 0 { i = 0 } } for ; i < 50; i++ { line := fmt.Sprintf("line %d", i) if getBoolValue(r.Form.Get("timestamps")) { l := &jsonlog.JSONLog{Log: line, Created: time.Now()} line = fmt.Sprintf("%s %s", l.Created.Format(timeutils.RFC3339NanoFixed), line) } if i%2 == 0 && stderr { fmt.Fprintln(errStream, line) } else if i%2 == 1 && stdout { fmt.Fprintln(outStream, line) } } }
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } // Validate args here, because we can't return not StatusOK after job.Run() call stdout, stderr := toBool(r.Form.Get("stdout")), toBool(r.Form.Get("stderr")) if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } logsConfig := &daemon.ContainerLogsConfig{ Follow: toBool(r.Form.Get("follow")), Timestamps: toBool(r.Form.Get("timestamps")), Tail: r.Form.Get("tail"), UseStdout: stdout, UseStderr: stderr, OutStream: utils.NewWriteFlusher(w), } d := getDaemon(eng) if err := d.ContainerLogs(vars["name"], logsConfig); err != nil { fmt.Fprintf(w, "Error running logs job: %s\n", err) } return nil }
func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if flush { job.Stdout.Add(utils.NewWriteFlusher(w)) } else { job.Stdout.Add(w) } }
func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := parseForm(r); err != nil { return err } authConfig := ®istry.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } } manifest, err := ioutil.ReadAll(r.Body) if err != nil { return err } job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) job.Setenv("manifest", string(manifest)) job.Setenv("tag", r.Form.Get("tag")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil }
func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := parseForm(r); err != nil { return err } authConfig := &cliconfig.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = &cliconfig.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) } } useJSON := version.GreaterThan("1.0") name := vars["name"] output := utils.NewWriteFlusher(w) imagePushConfig := &graph.ImagePushConfig{ MetaHeaders: metaHeaders, AuthConfig: authConfig, Tag: r.Form.Get("tag"), OutStream: output, Json: useJSON, } if useJSON { w.Header().Set("Content-Type", "application/json") } if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil { if !output.Flushed() { return err } sf := streamformatter.NewStreamFormatter(useJSON) output.Write(sf.FormatError(err)) } return nil }
func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } return s.daemon.ContainerStats(vars["name"], boolValue(r, "stream"), utils.NewWriteFlusher(w)) }
func getImageManifest(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } job := eng.Job("image_manifest", vars["name"]) job.Setenv("tag", r.Form.Get("tag")) job.Stdout.Add(utils.NewWriteFlusher(w)) return job.Run() }
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := s.graph.TempLayerArchive(imgID, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, progressreader.New(progressreader.Config{ In: layerData, Out: out, Formatter: sf, Size: int(layerData.Size), NewLines: false, ID: common.TruncateID(imgData.ID), Action: "Pushing", }), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil }
func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } d := getDaemon(eng) return d.ContainerStats(vars["name"], utils.NewWriteFlusher(w)) }
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( inspectJob = eng.Job("container_inspect", vars["name"]) logsJob = eng.Job("logs", vars["name"]) c, err = inspectJob.Stdout.AddEnv() ) if err != nil { return err } logsJob.Setenv("follow", r.Form.Get("follow")) logsJob.Setenv("tail", r.Form.Get("tail")) logsJob.Setenv("stdout", r.Form.Get("stdout")) logsJob.Setenv("stderr", r.Form.Get("stderr")) logsJob.Setenv("timestamps", r.Form.Get("timestamps")) // Validate args here, because we can't return not StatusOK after job.Run() call stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } if err = inspectJob.Run(); err != nil { return err } var outStream, errStream io.Writer outStream = utils.NewWriteFlusher(w) if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = utils.NewStdWriter(outStream, utils.Stderr) outStream = utils.NewStdWriter(outStream, utils.Stdout) } else { errStream = outStream } logsJob.Stdout.Add(outStream) logsJob.Stderr.Set(errStream) if err := logsJob.Run(); err != nil { fmt.Fprintf(outStream, "Error running logs job: %s\n", err) } return nil }
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil }
func postImagesDiffAndApply(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } job := eng.Job("apply_diff", r.Form.Get("container"), r.Form.Get("fromImage"), r.Form.Get("currentImage")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if job.Stdout.Used() { sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return err } return nil }
// pushRepository pushes layers that do not already exist on the registry. func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { log.Debugf("Local repo: %s", localRepo) out = utils.NewWriteFlusher(out) imgList, tags, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) imageIndex := s.createImageIndex(imgList, tags) log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err := r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", repoInfo.CanonicalName, nTag)) // push the repository to each of the endpoints only if it does not exist. for _, endpoint := range repoData.Endpoints { if err := s.pushImageToEndpoint(endpoint, out, repoInfo.RemoteName, imgList, tags, repoData, sf, r); err != nil { return err } } _, err = r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) return err }
func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = ®istry.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = ®istry.ConfigFile{} job = eng.Job("build") ) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = ®istry.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else { job.Setenv("rm", r.FormValue("rm")) } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) job.Setenv("forcerm", r.FormValue("forcerm")) job.SetenvJson("authConfig", authConfig) job.SetenvJson("configFile", configFile) if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) w.Write(sf.FormatError(err)) } return nil }
// Creates an image from Pull or from Import func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := &cliconfig.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &cliconfig.AuthConfig{} } } var ( opErr error useJSON = version.GreaterThan("1.0") ) if useJSON { w.Header().Set("Content-Type", "application/json") } if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } imagePullConfig := &graph.ImagePullConfig{ Parallel: version.GreaterThan("1.3"), MetaHeaders: metaHeaders, AuthConfig: authConfig, OutStream: utils.NewWriteFlusher(w), Json: useJSON, } opErr = s.daemon.Repositories().Pull(image, tag, imagePullConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } src := r.Form.Get("fromSrc") imageImportConfig := &graph.ImageImportConfig{ Changes: r.Form["changes"], InConfig: r.Body, OutStream: utils.NewWriteFlusher(w), Json: useJSON, } newConfig, err := builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes) if err != nil { return err } imageImportConfig.ContainerConfig = newConfig opErr = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig) } if opErr != nil { sf := streamformatter.NewStreamFormatter(useJSON) return fmt.Errorf(string(sf.FormatError(opErr))) } return nil }
func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var since int64 = -1 if r.Form.Get("since") != "" { s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) if err != nil { return err } since = s } var until int64 = -1 if r.Form.Get("until") != "" { u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64) if err != nil { return err } until = u } timer := time.NewTimer(0) timer.Stop() if until > 0 { dur := time.Unix(until, 0).Sub(time.Now()) timer = time.NewTimer(dur) } ef, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } isFiltered := func(field string, filter []string) bool { if len(filter) == 0 { return false } for _, v := range filter { if v == field { return false } if strings.Contains(field, ":") { image := strings.Split(field, ":") if image[0] == v { return false } } } return true } d := getDaemon(eng) es := d.EventsService w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(utils.NewWriteFlusher(w)) getContainerId := func(cn string) string { c, err := d.Get(cn) if err != nil { return "" } return c.ID } sendEvent := func(ev *jsonmessage.JSONMessage) error { //incoming container filter can be name,id or partial id, convert and replace as a full container id for i, cn := range ef["container"] { ef["container"][i] = getContainerId(cn) } if isFiltered(ev.Status, ef["event"]) || isFiltered(ev.From, ef["image"]) || isFiltered(ev.ID, ef["container"]) { return nil } return enc.Encode(ev) } current, l := es.Subscribe() defer es.Evict(l) for _, ev := range current { if ev.Time < since { continue } if err := sendEvent(ev); err != nil { return err } } for { select { case ev := <-l: jev, ok := ev.(*jsonmessage.JSONMessage) if !ok { continue } if err := sendEvent(jev); err != nil { return err } case <-timer.C: return nil } } }
func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = &cliconfig.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = &cliconfig.ConfigFile{} buildConfig = builder.NewBuildConfig() ) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &cliconfig.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = &cliconfig.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { w.Header().Set("Content-Type", "application/json") buildConfig.JSONFormat = true } if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { buildConfig.Remove = true } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { buildConfig.Remove = true } else { buildConfig.Remove = boolValue(r, "rm") } if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { buildConfig.Pull = true } output := utils.NewWriteFlusher(w) buildConfig.Stdout = output buildConfig.Context = r.Body buildConfig.RemoteURL = r.FormValue("remote") buildConfig.DockerfileName = r.FormValue("dockerfile") buildConfig.RepoName = r.FormValue("t") buildConfig.SuppressOutput = boolValue(r, "q") buildConfig.NoCache = boolValue(r, "nocache") buildConfig.ForceRemove = boolValue(r, "forcerm") buildConfig.AuthConfig = authConfig buildConfig.ConfigFile = configFile buildConfig.MemorySwap = int64ValueOrZero(r, "memswap") buildConfig.Memory = int64ValueOrZero(r, "memory") buildConfig.CpuShares = int64ValueOrZero(r, "cpushares") buildConfig.CpuQuota = int64ValueOrZero(r, "cpuquota") buildConfig.CpuSetCpus = r.FormValue("cpusetcpus") buildConfig.CpuSetMems = r.FormValue("cpusetmems") buildConfig.CgroupParent = r.FormValue("cgroupparent") // Job cancellation. Note: not all job types support this. if closeNotifier, ok := w.(http.CloseNotifier); ok { finished := make(chan struct{}) defer close(finished) go func() { select { case <-finished: case <-closeNotifier.CloseNotify(): logrus.Infof("Client disconnected, cancelling job: build") buildConfig.Cancel() } }() } if err := builder.Build(s.daemon, buildConfig); err != nil { // Do not write the error in the http output if it's still empty. // This prevents from writing a 200(OK) when there is an interal error. if !output.Flushed() { return err } sf := streamformatter.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) w.Write(sf.FormatError(err)) } return nil }
func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) log.Debugf("Local repo: %s", localRepo) imgList, tagsByImage, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) var ( repoData *registry.RepositoryData imageIndex []*registry.ImgData ) for _, imgId := range imgList { if tags, exists := tagsByImage[imgId]; exists { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: tag, }) } } else { // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is accociated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: "", }) } } log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } for _, ep := range repoData.Endpoints { out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) for _, imgId := range imgList { if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) } else { if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tagsByImage[imgId] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { return err } } } } if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { return err } return nil }