func (t *TarSum) Compute(seed []byte) string { logger.Debug("[TarSumCompute] seed:\n<<%s>>", seed) sort.Strings(t.hashes) t.sha.Reset() t.sha.Write(seed) for _, hash := range t.hashes { t.sha.Write([]byte(hash)) } tarsum := "tarsum+sha256:" + hex.EncodeToString(t.sha.Sum(nil)) logger.Debug("[TarSumCompute] return %s", tarsum) return tarsum }
func (t *TarSum) Append(header *tar.Header, reader io.Reader) { headerStr := "name" + header.Name headerStr += fmt.Sprintf("mode%d", header.Mode) headerStr += fmt.Sprintf("uid%d", header.Uid) headerStr += fmt.Sprintf("gid%d", header.Gid) headerStr += fmt.Sprintf("size%d", header.Size) headerStr += fmt.Sprintf("mtime%d", header.ModTime.UTC().Unix()) headerStr += fmt.Sprintf("typeflag%c", header.Typeflag) headerStr += "linkname" + header.Linkname headerStr += "uname" + header.Uname headerStr += "gname" + header.Gname headerStr += fmt.Sprintf("devmajor%d", header.Devmajor) headerStr += fmt.Sprintf("devminor%d", header.Devminor) t.sha.Reset() if header.Size > int64(0) { t.sha.Write([]byte(headerStr)) _, err := io.Copy(t.sha, reader) if err != nil { logger.Debug("[TarSumAppend] error copying to sha: %s", err.Error()) t.sha.Reset() t.sha.Write([]byte(headerStr)) } } else { t.sha.Write([]byte(headerStr)) } t.hashes = append(t.hashes, hex.EncodeToString(t.sha.Sum(nil))) }
func (t *TarInfo) Load(file io.ReadSeeker) { var reader *tar.Reader file.Seek(0, 0) gzipReader, err := gzip.NewReader(file) if err != nil { // likely not a gzip compressed file file.Seek(0, 0) reader = tar.NewReader(file) } else { reader = tar.NewReader(gzipReader) } for { header, err := reader.Next() if err == io.EOF { // end of tar file break } else if err != nil { // error occured logger.Debug("[TarInfoLoad] Error when reading tar stream tarsum. Disabling TarSum, TarFilesInfo. Error: %s", err.Error()) t.Error = TarError(err.Error()) return } t.TarSum.Append(header, reader) t.TarFilesInfo.Append(header) } }
func (a *RegistryAPI) GetRepoTagsHandler(w http.ResponseWriter, r *http.Request) { namespace, repo, _ := parseRepo(r, "") logger.Debug("[GetRepoTags] namespace=%s; repository=%s", namespace, repo) names, err := a.Storage.List(storage.RepoTagPath(namespace, repo, "")) if err != nil { a.response(w, "Repository not found: "+err.Error(), http.StatusNotFound, EMPTY_HEADERS) return } data := map[string]string{} for _, name := range names { base := path.Base(name) if !strings.HasPrefix(base, storage.TAG_PREFIX) { continue } // this is a tag tagName := strings.TrimPrefix(base, storage.TAG_PREFIX) content, err := a.Storage.Get(name) if err != nil { a.internalError(w, err.Error()) return } data[tagName] = string(content) } a.response(w, data, http.StatusOK, EMPTY_HEADERS) }
func (a *RegistryAPI) DeleteRepoTagsHandler(w http.ResponseWriter, r *http.Request) { namespace, repo, _ := parseRepo(r, "") logger.Debug("[DeleteRepoTags] namespace=%s; repository=%s", namespace, repo) if err := a.Storage.RemoveAll(storage.RepoTagPath(namespace, repo, "")); err != nil { a.response(w, "Repository not found: "+err.Error(), http.StatusNotFound, EMPTY_HEADERS) return } a.response(w, true, http.StatusOK, EMPTY_HEADERS) }
func (a *RegistryAPI) GetRepoTagHandler(w http.ResponseWriter, r *http.Request) { namespace, repo, tag := parseRepo(r, "tag") logger.Debug("[GetRepoTag] namespace=%s; repository=%s; tag=%s", namespace, repo, tag) content, err := a.Storage.Get(storage.RepoTagPath(namespace, repo, tag)) if err != nil { a.response(w, "Tag not found: "+err.Error(), http.StatusNotFound, EMPTY_HEADERS) return } a.response(w, content, http.StatusOK, EMPTY_HEADERS) }
func (a *RegistryAPI) PutRepoTagHandler(w http.ResponseWriter, r *http.Request) { namespace, repo, tag := parseRepo(r, "tag") logger.Debug("[PutRepoTag] namespace=%s; repository=%s; tag=%s", namespace, repo, tag) data, err := ioutil.ReadAll(r.Body) if err != nil { a.response(w, "Error reading request body: "+err.Error(), http.StatusBadRequest, EMPTY_HEADERS) return } else if len(data) == 0 { a.response(w, "Empty data", http.StatusBadRequest, EMPTY_HEADERS) return } logger.Debug("[PutRepoTag] body:\n%s", data) imageID := strings.Trim(string(data), "\"") // trim quotes if exists, err := a.Storage.Exists(storage.ImageJsonPath(imageID)); err != nil || !exists { a.response(w, "Image not found: "+err.Error(), http.StatusNotFound, EMPTY_HEADERS) return } err = a.Storage.Put(storage.RepoTagPath(namespace, repo, tag), []byte(imageID)) if err != nil { a.internalError(w, err.Error()) return } uaStrings := r.Header["User-Agent"] uaString := "" if len(uaStrings) > 0 { // just use the first one. there *should* only be one to begin with. uaString = uaStrings[0] } dataMap := CreateRepoJson(uaString) jsonData, err := json.Marshal(&dataMap) if err != nil { a.internalError(w, err.Error()) return } a.Storage.Put(storage.RepoTagJsonPath(namespace, repo, tag), jsonData) if tag == "latest" { a.Storage.Put(storage.RepoJsonPath(namespace, repo), jsonData) } a.response(w, true, http.StatusOK, EMPTY_HEADERS) }
func (a *RegistryAPI) GetRepoJsonHandler(w http.ResponseWriter, r *http.Request) { namespace, repo, _ := parseRepo(r, "") logger.Debug("[GetRepoJson] namespace=%s; repository=%s", namespace, repo) content, err := a.Storage.Get(storage.RepoJsonPath(namespace, repo)) if err != nil { // docker-registry has this error ignored. so i guess we will too... a.response(w, EMPTY_REPO_JSON, http.StatusOK, EMPTY_HEADERS) return } var data map[string]interface{} if err := json.Unmarshal(content, &data); err != nil { // docker-registry has this error ignored. so i guess we will too... a.response(w, EMPTY_REPO_JSON, http.StatusOK, EMPTY_HEADERS) return } a.response(w, data, http.StatusOK, EMPTY_HEADERS) return }
func GenerateAncestry(s storage.Storage, imageID, parentID string) (err error) { logger.Debug("[GenerateAncestry] imageID=" + imageID + " parentID=" + parentID) path := storage.ImageAncestryPath(imageID) if parentID == "" { return s.Put(path, []byte(`["`+imageID+`"]`)) } var content []byte if content, err = s.Get(storage.ImageAncestryPath(parentID)); err != nil { return err } var ancestry []string if err := json.Unmarshal(content, &ancestry); err != nil { return err } ancestry = append([]string{imageID}, ancestry...) if content, err = json.Marshal(&ancestry); err != nil { return err } return s.Put(path, content) }
func GenDiff(s storage.Storage, imageID string) { // Comment from docker-registry 0.6.5 // get json describing file differences in layer // Calculate the diff information for the files contained within // the layer. Return a dictionary of lists grouped by whether they // were deleted, changed or created in this layer. // To determine what happened to a file in a layer we walk backwards // through the ancestry until we see the file in an older layer. Based // on whether the file was previously deleted or not we know whether // the file was created or modified. If we do not find the file in an // ancestor we know the file was just created. // - File marked as deleted by union fs tar: DELETED // - Ancestor contains non-deleted file: CHANGED // - Ancestor contains deleted marked file: CREATED // - No ancestor contains file: CREATED diffJson, err := GetImageDiffCache(s, imageID) if err == nil && diffJson != nil { // cache hit, just return logger.Debug("[GenDiff][" + imageID + "] already exists") return } anPath := storage.ImageAncestryPath(imageID) anContent, err := s.Get(anPath) if err != nil { // error fetching ancestry, just return logger.Error("[GenDiff][" + imageID + "] error fetching ancestry: " + err.Error()) return } var ancestry []string if err := json.Unmarshal(anContent, &ancestry); err != nil { // json unmarshal fail, just return logger.Error("[GenDiff][" + imageID + "] error unmarshalling ancestry json: " + err.Error()) return } // get map of file infos infoMap, err := fileInfoMap(s, imageID) if err != nil { // error getting file info, just return logger.Error("[GenDiff][" + imageID + "] error getting files info: " + err.Error()) return } deleted := map[string][]interface{}{} changed := map[string][]interface{}{} created := map[string][]interface{}{} for _, anID := range ancestry { anInfoMap, err := fileInfoMap(s, anID) if err != nil { // error getting file info, just return logger.Error("[GenDiff][" + imageID + "] error getting ancestor " + anID + " files info: " + err.Error()) return } for fname, info := range infoMap { isDeleted, isBool := (info[1]).(bool) // if the file info is in a bad format (isDeleted is not a bool), we should just assume it is deleted. // technically isBool should never be false. if !isBool || isDeleted { if !isBool { logger.Error("[GenDiff][" + imageID + "] file info is in a bad format") } deleted[fname] = info delete(infoMap, fname) continue } anInfo := anInfoMap[fname] if err != nil || anInfo == nil { // doesn't exist, must be created. do nothing. continue } isDeleted, isBool = anInfo[1].(bool) if !isBool || isDeleted { if !isBool { logger.Error("[GenDiff][" + imageID + "] file info is in a bad format") } // deleted in ancestor, must be created now. created[fname] = info } else { // not deleted in ancestor, must have just changed now. changed[fname] = info } delete(infoMap, fname) } } // dump all created stuff from infoMap for fname, info := range infoMap { created[fname] = info } diff := map[string]map[string][]interface{}{ "deleted": deleted, "changed": changed, "created": created, } if diffJson, err = json.Marshal(&diff); err != nil { // json marshal fail. just return logger.Error("[GenDiff][" + imageID + "] error marshalling new diff json: " + err.Error()) return } if err := SetImageDiffCache(s, imageID, diffJson); err != nil { // json marshal fail. just return logger.Error("[GenDiff][" + imageID + "] error setting new diff cache: " + err.Error()) return } }