Beispiel #1
0
func rotateKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	role, gun, keyAlgorithm, store, crypto, err := setupKeyHandler(ctx, w, r, vars, http.MethodPost)
	if err != nil {
		return err
	}
	var key data.PublicKey
	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
	switch role {
	case data.CanonicalTimestampRole:
		key, err = timestamp.RotateTimestampKey(gun, store, crypto, keyAlgorithm)
	case data.CanonicalSnapshotRole:
		key, err = snapshot.RotateSnapshotKey(gun, store, crypto, keyAlgorithm)
	default:
		logger.Infof("400 POST %s key: %v", role, err)
		return errors.ErrInvalidRole.WithDetail(role)
	}
	if err != nil {
		logger.Errorf("500 POST %s key: %v", role, err)
		return errors.ErrUnknown.WithDetail(err)
	}

	out, err := json.Marshal(key)
	if err != nil {
		logger.Errorf("500 POST %s key", role)
		return errors.ErrUnknown.WithDetail(err)
	}
	logger.Debugf("200 POST %s key", role)
	w.Write(out)
	return nil
}
Beispiel #2
0
// To be called before getKeyHandler or rotateKeyHandler
func setupKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string, actionVerb string) (string, string, string, storage.MetaStore, signed.CryptoService, error) {
	gun, ok := vars["imageName"]
	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
	if !ok || gun == "" {
		logger.Infof("400 %s no gun in request", actionVerb)
		return "", "", "", nil, nil, errors.ErrUnknown.WithDetail("no gun")
	}

	role, ok := vars["tufRole"]
	if !ok || role == "" {
		logger.Infof("400 %s no role in request", actionVerb)
		return "", "", "", nil, nil, errors.ErrUnknown.WithDetail("no role")
	}

	s := ctx.Value(notary.CtxKeyMetaStore)
	store, ok := s.(storage.MetaStore)
	if !ok || store == nil {
		logger.Errorf("500 %s storage not configured", actionVerb)
		return "", "", "", nil, nil, errors.ErrNoStorage.WithDetail(nil)
	}
	c := ctx.Value(notary.CtxKeyCryptoSvc)
	crypto, ok := c.(signed.CryptoService)
	if !ok || crypto == nil {
		logger.Errorf("500 %s crypto service not configured", actionVerb)
		return "", "", "", nil, nil, errors.ErrNoCryptoService.WithDetail(nil)
	}
	algo := ctx.Value(notary.CtxKeyKeyAlgo)
	keyAlgo, ok := algo.(string)
	if !ok || keyAlgo == "" {
		logger.Errorf("500 %s key algorithm not configured", actionVerb)
		return "", "", "", nil, nil, errors.ErrNoKeyAlgorithm.WithDetail(nil)
	}

	return role, gun, keyAlgo, store, crypto, nil
}
Beispiel #3
0
func getHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	gun := vars["imageName"]
	checksum := vars["checksum"]
	tufRole := vars["tufRole"]
	s := ctx.Value(notary.CtxKeyMetaStore)

	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")

	store, ok := s.(storage.MetaStore)
	if !ok {
		logger.Error("500 GET: no storage exists")
		return errors.ErrNoStorage.WithDetail(nil)
	}

	lastModified, output, err := getRole(ctx, store, gun, tufRole, checksum)
	if err != nil {
		logger.Infof("404 GET %s role", tufRole)
		return err
	}
	if lastModified != nil {
		// This shouldn't always be true, but in case it is nil, and the last modified headers
		// are not set, the cache control handler should set the last modified date to the beginning
		// of time.
		utils.SetLastModifiedHeader(w.Header(), *lastModified)
	} else {
		logger.Warnf("Got bytes out for %s's %s (checksum: %s), but missing lastModified date",
			gun, tufRole, checksum)
	}

	w.Write(output)
	return nil
}
Beispiel #4
0
// GetTimestampHandler returns a timestamp.json given a GUN
func GetTimestampHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
	s := ctx.Value("metaStore")
	store, ok := s.(storage.MetaStore)
	if !ok {
		return errors.ErrNoStorage.WithDetail(nil)
	}
	cryptoServiceVal := ctx.Value("cryptoService")
	cryptoService, ok := cryptoServiceVal.(signed.CryptoService)
	if !ok {
		return errors.ErrNoCryptoService.WithDetail(nil)
	}

	vars := mux.Vars(r)
	gun := vars["imageName"]
	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")

	out, err := timestamp.GetOrCreateTimestamp(gun, store, cryptoService)
	if err != nil {
		switch err.(type) {
		case *storage.ErrNoKey, *storage.ErrNotFound:
			logger.Error("404 GET timestamp")
			return errors.ErrMetadataNotFound.WithDetail(nil)
		default:
			logger.Error("500 GET timestamp")
			return errors.ErrUnknown.WithDetail(err)
		}
	}

	logger.Debug("200 GET timestamp")
	w.Write(out)
	return nil
}
Beispiel #5
0
func getKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	gun, ok := vars["imageName"]
	if !ok || gun == "" {
		return errors.ErrUnknown.WithDetail("no gun")
	}
	role, ok := vars["tufRole"]
	if !ok || role == "" {
		return errors.ErrUnknown.WithDetail("no role")
	}

	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")

	s := ctx.Value("metaStore")
	store, ok := s.(storage.MetaStore)
	if !ok || store == nil {
		logger.Error("500 GET storage not configured")
		return errors.ErrNoStorage.WithDetail(nil)
	}
	c := ctx.Value("cryptoService")
	crypto, ok := c.(signed.CryptoService)
	if !ok || crypto == nil {
		logger.Error("500 GET crypto service not configured")
		return errors.ErrNoCryptoService.WithDetail(nil)
	}
	algo := ctx.Value("keyAlgorithm")
	keyAlgo, ok := algo.(string)
	if !ok || keyAlgo == "" {
		logger.Error("500 GET key algorithm not configured")
		return errors.ErrNoKeyAlgorithm.WithDetail(nil)
	}
	keyAlgorithm := keyAlgo

	var (
		key data.PublicKey
		err error
	)
	switch role {
	case data.CanonicalTimestampRole:
		key, err = timestamp.GetOrCreateTimestampKey(gun, store, crypto, keyAlgorithm)
	case data.CanonicalSnapshotRole:
		key, err = snapshot.GetOrCreateSnapshotKey(gun, store, crypto, keyAlgorithm)
	default:
		logger.Errorf("400 GET %s key: %v", role, err)
		return errors.ErrInvalidRole.WithDetail(role)
	}
	if err != nil {
		logger.Errorf("500 GET %s key: %v", role, err)
		return errors.ErrUnknown.WithDetail(err)
	}

	out, err := json.Marshal(key)
	if err != nil {
		logger.Errorf("500 GET %s key", role)
		return errors.ErrUnknown.WithDetail(err)
	}
	logger.Debugf("200 GET %s key", role)
	w.Write(out)
	return nil
}
Beispiel #6
0
// Fetch checks for the availability of the layer in the repository via the
// cache. If present, the metadata is resolved and the layer is returned. If
// any operation fails, the layer is read directly from the upstream. The
// results are cached, if possible.
func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) {
	ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst)
	now := time.Now()
	defer func() {
		ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)).
			Infof("(*layerInfoCache).Fetch(%q)", dgst)
	}()

	atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1)
	available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst)
	if err != nil {
		ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err)
		goto fallback
	}

	if available {
		// fast path: get the layer info and return
		meta, err := lc.cache.Meta(lc.ctx, dgst)
		if err != nil {
			ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err)
			goto fallback
		}

		atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1)
		return newLayerReader(lc.driver, dgst, meta.Path, meta.Length)
	}

	// NOTE(stevvooe): Unfortunately, the cache here only makes checks for
	// existing layers faster. We'd have to provide more careful
	// synchronization with the backend to make the missing case as fast.

fallback:
	atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1)
	layer, err := lc.LayerService.Fetch(dgst)
	if err != nil {
		return nil, err
	}

	// add the layer to the repository
	if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil {
		ctxu.GetLogger(lc.ctx).
			Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err)
	}

	// lookup layer path and add it to the cache, if it succeds. Note that we
	// still return the layer even if we have trouble caching it.
	if path, err := lc.resolveLayerPath(layer); err != nil {
		ctxu.GetLogger(lc.ctx).
			Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err)
	} else {
		// add the layer to the cache once we've resolved the path.
		if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil {
			ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err)
		}
	}

	return layer, err
}
Beispiel #7
0
// Finish marks the upload as completed, returning a valid handle to the
// uploaded layer. The final size and checksum are validated against the
// contents of the uploaded layer. The checksum should be provided in the
// format <algorithm>:<hex digest>.
func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) {
	context.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish")

	if err := lw.bufferedFileWriter.Close(); err != nil {
		return nil, err
	}

	var (
		canonical digest.Digest
		err       error
	)

	// HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry
	// validation on failure. Three attempts are made, backing off
	// retries*100ms each time.
	for retries := 0; ; retries++ {
		canonical, err = lw.validateLayer(dgst)
		if err == nil {
			break
		}

		context.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries).
			Errorf("error validating layer: %v", err)

		if retries < 3 {
			time.Sleep(100 * time.Millisecond * time.Duration(retries+1))
			continue
		}

		return nil, err

	}

	if err := lw.moveLayer(canonical); err != nil {
		// TODO(stevvooe): Cleanup?
		return nil, err
	}

	// Link the layer blob into the repository.
	if err := lw.linkLayer(canonical, dgst); err != nil {
		return nil, err
	}

	if err := lw.removeResources(); err != nil {
		return nil, err
	}

	return lw.layerStore.Fetch(canonical)
}
Beispiel #8
0
// DeleteHandler deletes all data for a GUN. A 200 responses indicates success.
func DeleteHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
	s := ctx.Value("metaStore")
	store, ok := s.(storage.MetaStore)
	if !ok {
		return errors.ErrNoStorage.WithDetail(nil)
	}
	vars := mux.Vars(r)
	gun := vars["imageName"]
	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
	err := store.Delete(gun)
	if err != nil {
		logger.Error("500 DELETE repository")
		return errors.ErrUnknown.WithDetail(err)
	}
	return nil
}
Beispiel #9
0
// GetTimestampKeyHandler returns a timestamp public key, creating a new key-pair
// it if it doesn't yet exist
func GetTimestampKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
	vars := mux.Vars(r)
	gun := vars["imageName"]

	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")

	s := ctx.Value("metaStore")
	store, ok := s.(storage.MetaStore)
	if !ok {
		logger.Error("500 GET storage not configured")
		return errors.ErrNoStorage.WithDetail(nil)
	}
	c := ctx.Value("cryptoService")
	crypto, ok := c.(signed.CryptoService)
	if !ok {
		logger.Error("500 GET crypto service not configured")
		return errors.ErrNoCryptoService.WithDetail(nil)
	}
	algo := ctx.Value("keyAlgorithm")
	keyAlgo, ok := algo.(string)
	if !ok {
		logger.Error("500 GET key algorithm not configured")
		return errors.ErrNoKeyAlgorithm.WithDetail(nil)
	}
	keyAlgorithm := keyAlgo

	key, err := timestamp.GetOrCreateTimestampKey(gun, store, crypto, keyAlgorithm)
	if err != nil {
		logger.Errorf("500 GET timestamp key: %v", err)
		return errors.ErrUnknown.WithDetail(err)
	}

	out, err := json.Marshal(key)
	if err != nil {
		logger.Error("500 GET timestamp key")
		return errors.ErrUnknown.WithDetail(err)
	}
	logger.Debug("200 GET timestamp key")
	w.Write(out)
	return nil
}
Beispiel #10
0
// Exists checks for existence of the digest in the cache, immediately
// returning if it exists for the repository. If not, the upstream is checked.
// When a positive result is found, it is written into the cache.
func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) {
	ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst)
	now := time.Now()
	defer func() {
		// TODO(stevvooe): Replace this with a decent context-based metrics solution
		ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)).
			Infof("(*cachedLayerService).Exists(%q)", dgst)
	}()

	atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1)
	available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst)
	if err != nil {
		ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err)
		goto fallback
	}

	if available {
		atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1)
		return true, nil
	}

fallback:
	atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1)
	exists, err := lc.LayerService.Exists(dgst)
	if err != nil {
		return exists, err
	}

	if exists {
		// we can only cache this if the existence is positive.
		if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil {
			ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err)
		}
	}

	return exists, err
}
Beispiel #11
0
func (app *App) configureRedis(configuration *configuration.Configuration) {
	if configuration.Redis.Addr == "" {
		ctxu.GetLogger(app).Infof("redis not configured")
		return
	}

	pool := &redis.Pool{
		Dial: func() (redis.Conn, error) {
			// TODO(stevvooe): Yet another use case for contextual timing.
			ctx := context.WithValue(app, "redis.connect.startedat", time.Now())

			done := func(err error) {
				logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration",
					ctxu.Since(ctx, "redis.connect.startedat"))
				if err != nil {
					logger.Errorf("redis: error connecting: %v", err)
				} else {
					logger.Infof("redis: connect %v", configuration.Redis.Addr)
				}
			}

			conn, err := redis.DialTimeout("tcp",
				configuration.Redis.Addr,
				configuration.Redis.DialTimeout,
				configuration.Redis.ReadTimeout,
				configuration.Redis.WriteTimeout)
			if err != nil {
				ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v",
					configuration.Redis.Addr, err)
				done(err)
				return nil, err
			}

			// authorize the connection
			if configuration.Redis.Password != "" {
				if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil {
					defer conn.Close()
					done(err)
					return nil, err
				}
			}

			// select the database to use
			if configuration.Redis.DB != 0 {
				if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil {
					defer conn.Close()
					done(err)
					return nil, err
				}
			}

			done(nil)
			return conn, nil
		},
		MaxIdle:     configuration.Redis.Pool.MaxIdle,
		MaxActive:   configuration.Redis.Pool.MaxActive,
		IdleTimeout: configuration.Redis.Pool.IdleTimeout,
		TestOnBorrow: func(c redis.Conn, t time.Time) error {
			// TODO(stevvooe): We can probably do something more interesting
			// here with the health package.
			_, err := c.Do("PING")
			return err
		},
		Wait: false, // if a connection is not avialable, proceed without cache.
	}

	app.redis = pool

	// setup expvar
	registry := expvar.Get("registry")
	if registry == nil {
		registry = expvar.NewMap("registry")
	}

	registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} {
		return map[string]interface{}{
			"Config": configuration.Redis,
			"Active": app.redis.ActiveCount(),
		}
	}))
}
Beispiel #12
0
func atomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	gun := vars["imageName"]
	s := ctx.Value("metaStore")
	logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
	store, ok := s.(storage.MetaStore)
	if !ok {
		logger.Error("500 POST unable to retrieve storage")
		return errors.ErrNoStorage.WithDetail(nil)
	}
	cryptoServiceVal := ctx.Value("cryptoService")
	cryptoService, ok := cryptoServiceVal.(signed.CryptoService)
	if !ok {
		logger.Error("500 POST unable to retrieve signing service")
		return errors.ErrNoCryptoService.WithDetail(nil)
	}

	reader, err := r.MultipartReader()
	if err != nil {
		return errors.ErrMalformedUpload.WithDetail(nil)
	}
	var updates []storage.MetaUpdate
	for {
		part, err := reader.NextPart()
		if err == io.EOF {
			break
		}
		role := strings.TrimSuffix(part.FileName(), ".json")
		if role == "" {
			return errors.ErrNoFilename.WithDetail(nil)
		} else if !data.ValidRole(role) {
			return errors.ErrInvalidRole.WithDetail(role)
		}
		meta := &data.SignedMeta{}
		var input []byte
		inBuf := bytes.NewBuffer(input)
		dec := json.NewDecoder(io.TeeReader(part, inBuf))
		err = dec.Decode(meta)
		if err != nil {
			return errors.ErrMalformedJSON.WithDetail(nil)
		}
		version := meta.Signed.Version
		updates = append(updates, storage.MetaUpdate{
			Role:    role,
			Version: version,
			Data:    inBuf.Bytes(),
		})
	}
	updates, err = validateUpdate(cryptoService, gun, updates, store)
	if err != nil {
		serializable, serializableError := validation.NewSerializableError(err)
		if serializableError != nil {
			return errors.ErrInvalidUpdate.WithDetail(nil)
		}
		return errors.ErrInvalidUpdate.WithDetail(serializable)
	}
	err = store.UpdateMany(gun, updates)
	if err != nil {
		// If we have an old version error, surface to user with error code
		if _, ok := err.(storage.ErrOldVersion); ok {
			return errors.ErrOldVersion.WithDetail(err)
		}
		// More generic storage update error, possibly due to attempted rollback
		logger.Errorf("500 POST error applying update request: %v", err)
		return errors.ErrUpdating.WithDetail(nil)
	}
	return nil
}
Beispiel #13
0
// validateLayer checks the layer data against the digest, returning an error
// if it does not match. The canonical digest is returned.
func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) {
	var (
		verified, fullHash bool
		canonical          digest.Digest
	)

	if lw.resumableDigester != nil {
		// Restore the hasher state to the end of the upload.
		if err := lw.resumeHashAt(lw.size); err != nil {
			return "", err
		}

		canonical = lw.resumableDigester.Digest()

		if canonical.Algorithm() == dgst.Algorithm() {
			// Common case: client and server prefer the same canonical digest
			// algorithm - currently SHA256.
			verified = dgst == canonical
		} else {
			// The client wants to use a different digest algorithm. They'll just
			// have to be patient and wait for us to download and re-hash the
			// uploaded content using that digest algorithm.
			fullHash = true
		}
	} else {
		// Not using resumable digests, so we need to hash the entire layer.
		fullHash = true
	}

	if fullHash {
		digester := digest.NewCanonicalDigester()

		digestVerifier, err := digest.NewDigestVerifier(dgst)
		if err != nil {
			return "", err
		}

		// Read the file from the backend driver and validate it.
		fr, err := newFileReader(lw.layerStore.repository.ctx, lw.bufferedFileWriter.driver, lw.path)
		if err != nil {
			return "", err
		}

		tr := io.TeeReader(fr, digester)

		if _, err = io.Copy(digestVerifier, tr); err != nil {
			return "", err
		}

		canonical = digester.Digest()
		verified = digestVerifier.Verified()
	}

	if !verified {
		context.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst).
			Errorf("canonical digest does match provided digest")
		return "", distribution.ErrLayerInvalidDigest{
			Digest: dgst,
			Reason: fmt.Errorf("content does not match digest"),
		}
	}

	return canonical, nil
}