// GetHandler returns the json for a specified role and GUN. func GetHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] tufRole := vars["tufRole"] logger := ctxu.GetLoggerWithFields(ctx, map[string]interface{}{"gun": gun, "tufRole": tufRole}) out, err := store.GetCurrent(gun, tufRole) if err != nil { if _, ok := err.(*storage.ErrNotFound); ok { return errors.ErrMetadataNotFound.WithDetail(nil) } logger.Error("500 GET") return errors.ErrUnknown.WithDetail(err) } if out == nil { logger.Error("404 GET") return errors.ErrMetadataNotFound.WithDetail(nil) } w.Write(out) logger.Debug("200 GET") return nil }
// copyFullPayload copies the payload of a HTTP request to destWriter. If it // receives less content than expected, and the client disconnected during the // upload, it avoids sending a 400 error to keep the logs cleaner. func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { // Get a channel that tells us if the client disconnects var clientClosed <-chan bool if notifier, ok := responseWriter.(http.CloseNotifier); ok { clientClosed = notifier.CloseNotify() } else { ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) } // Read in the data, if any. copied, err := io.Copy(destWriter, r.Body) if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { // Didn't receive as much content as expected. Did the client // disconnect during the request? If so, avoid returning a 400 // error to keep the logs cleaner. select { case <-clientClosed: // Set the response code to "499 Client Closed Request" // Even though the connection has already been closed, // this causes the logger to pick up a 499 error // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ "error": err, "copied": copied, "contentLength": r.ContentLength, }, "error", "copied", "contentLength").Error("client disconnected during " + action) return errors.New("client disconnected") default: } } if err != nil { ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) return err } return nil }
func getHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { gun := vars["imageName"] tufRole := vars["tufRole"] s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } logger := ctxu.GetLoggerWithFields(ctx, map[string]interface{}{"gun": gun, "tufRole": tufRole}) switch tufRole { case data.CanonicalTimestampRole: return getTimestamp(ctx, w, logger, store, gun) case data.CanonicalSnapshotRole: return getSnapshot(ctx, w, logger, store, gun) } out, err := store.GetCurrent(gun, tufRole) if err != nil { if _, ok := err.(storage.ErrNotFound); ok { logrus.Error("404 GET " + gun + ":" + tufRole) return errors.ErrMetadataNotFound.WithDetail(nil) } logger.Error("500 GET") return errors.ErrUnknown.WithDetail(err) } if out == nil { logger.Error("404 GET") return errors.ErrMetadataNotFound.WithDetail(nil) } w.Write(out) logger.Debug("200 GET") return nil }
// validateBlob checks the data against the digest, returning an error if it // does not match. The canonical descriptor is returned. func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { var ( verified, fullHash bool canonical digest.Digest ) if desc.Digest == "" { // if no descriptors are provided, we have nothing to validate // against. We don't really want to support this for the registry. return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ Reason: fmt.Errorf("cannot validate against empty digest"), } } // Stat the on disk file if fi, err := bw.fileWriter.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is // not actually present for the reader. We now assume // that the desc length is zero. desc.Size = 0 default: // Any other error we want propagated up the stack. return distribution.Descriptor{}, err } } else { if fi.IsDir() { return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) } bw.size = fi.Size() } if desc.Size > 0 { if desc.Size != bw.size { return distribution.Descriptor{}, distribution.ErrBlobInvalidLength } } else { // if provided 0 or negative length, we can assume caller doesn't know or // care about length. desc.Size = bw.size } // TODO(stevvooe): This section is very meandering. Need to be broken down // to be a lot more clear. if err := bw.resumeDigestAt(ctx, bw.size); err == nil { canonical = bw.digester.Digest() if canonical.Algorithm() == desc.Digest.Algorithm() { // Common case: client and server prefer the same canonical digest // algorithm - currently SHA256. verified = desc.Digest == canonical } else { // The client wants to use a different digest algorithm. They'll just // have to be patient and wait for us to download and re-hash the // uploaded content using that digest algorithm. fullHash = true } } else if err == errResumableDigestNotAvailable { // Not using resumable digests, so we need to hash the entire layer. fullHash = true } else { return distribution.Descriptor{}, err } if fullHash { // a fantastic optimization: if the the written data and the size are // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } // If the check based on size fails, we fall back to the slowest of // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { digester := digest.Canonical.New() digestVerifier, err := digest.NewDigestVerifier(desc.Digest) if err != nil { return distribution.Descriptor{}, err } // Read the file from the backend driver and validate it. fr, err := newFileReader(ctx, bw.fileWriter.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } defer fr.Close() tr := io.TeeReader(fr, digester.Hash()) if _, err := io.Copy(digestVerifier, tr); err != nil { return distribution.Descriptor{}, err } canonical = digester.Digest() verified = digestVerifier.Verified() } } if !verified { context.GetLoggerWithFields(ctx, map[interface{}]interface{}{ "canonical": canonical, "provided": desc.Digest, }, "canonical", "provided"). Errorf("canonical digest does match provided digest") return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ Digest: desc.Digest, Reason: fmt.Errorf("content does not match digest"), } } // update desc with canonical hash desc.Digest = canonical if desc.MediaType == "" { desc.MediaType = "application/octet-stream" } return desc, nil }