// Creates an image from Pull or from Import func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := ®istry.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } job = eng.Job("pull", image, tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) job.Stdin.Add(r.Body) } if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil }
func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if flush { job.Stdout.Add(utils.NewWriteFlusher(w)) } else { job.Stdout.Add(w) } }
func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := parseForm(r); err != nil { return err } authConfig := ®istry.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } } job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) job.Setenv("tag", r.Form.Get("tag")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil }
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( inspectJob = eng.Job("container_inspect", vars["name"]) logsJob = eng.Job("logs", vars["name"]) c, err = inspectJob.Stdout.AddEnv() ) if err != nil { return err } logsJob.Setenv("follow", r.Form.Get("follow")) logsJob.Setenv("tail", r.Form.Get("tail")) logsJob.Setenv("stdout", r.Form.Get("stdout")) logsJob.Setenv("stderr", r.Form.Get("stderr")) logsJob.Setenv("timestamps", r.Form.Get("timestamps")) // Validate args here, because we can't return not StatusOK after job.Run() call stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } if err = inspectJob.Run(); err != nil { return err } var outStream, errStream io.Writer outStream = utils.NewWriteFlusher(w) if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = utils.NewStdWriter(outStream, utils.Stderr) outStream = utils.NewStdWriter(outStream, utils.Stdout) } else { errStream = outStream } logsJob.Stdout.Add(outStream) logsJob.Stderr.Set(errStream) if err := logsJob.Run(); err != nil { fmt.Fprintf(outStream, "Error running logs job: %s\n", err) } return nil }
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := s.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil }
func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) log.Debugf("Local repo: %s", localRepo) imgList, tagsByImage, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) var ( repoData *registry.RepositoryData imageIndex []*registry.ImgData ) for _, imgId := range imgList { if tags, exists := tagsByImage[imgId]; exists { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: tag, }) } } else { // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is accociated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: "", }) } } log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } for _, ep := range repoData.Endpoints { out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) for _, imgId := range imgList { if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) } else { if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tagsByImage[imgId] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { return err } } } } if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { return err } return nil }
func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = ®istry.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = ®istry.ConfigFile{} job = eng.Job("build") ) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = ®istry.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else { job.Setenv("rm", r.FormValue("rm")) } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) job.Setenv("forcerm", r.FormValue("forcerm")) job.SetenvJson("authConfig", authConfig) job.SetenvJson("configFile", configFile) if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) w.Write(sf.FormatError(err)) } return nil }