Beispiel #1
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id)

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(di.img.id),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Beispiel #2
0
func (t *transport) UpgradeHTTP(req *http.Request, l log15.Logger) (*http.Response, net.Conn, error) {
	stickyBackend := t.getStickyBackend(req)
	backends := t.getOrderedBackends(stickyBackend)
	upconn, addr, err := dialTCP(context.Background(), l, backends)
	if err != nil {
		l.Error("dial failed", "status", "503", "num_backends", len(backends))
		return nil, nil, err
	}
	conn := &streamConn{bufio.NewReader(upconn), upconn}
	req.URL.Host = addr

	if err := req.Write(conn); err != nil {
		conn.Close()
		l.Error("error writing request", "err", err, "backend", addr)
		return nil, nil, err
	}
	res, err := http.ReadResponse(conn.Reader, req)
	if err != nil {
		conn.Close()
		l.Error("error reading response", "err", err, "backend", addr)
		return nil, nil, err
	}
	t.setStickyBackend(res, stickyBackend)
	return res, conn, nil
}
Beispiel #3
0
func TestServeConnClientGone(t *testing.T) {
	control, conn := net.Pipe()
	cnConn := connutil.CloseNotifyConn(conn)

	clientGone := false
	dialer = dialerFunc(func(_, _ string) (net.Conn, error) {
		if clientGone {
			err := errors.New("dial after client gone")
			t.Error(err)
			return nil, err
		}

		if err := control.Close(); err != nil {
			t.Fatal(err)
		}
		<-cnConn.(http.CloseNotifier).CloseNotify()

		clientGone = true
		return nil, &dialErr{}
	})

	fn := func() []string { return []string{"127.0.0.1:0", "127.0.0.1:0"} }
	prox := NewReverseProxy(fn, nil, false)

	prox.ServeConn(context.Background(), cnConn)
}
Beispiel #4
0
func (s *HTTPListener) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	ctx := context.Background()
	ctx = ctxhelper.NewContextStartTime(ctx, time.Now())
	r := s.findRoute(req.Host, req.URL.Path)
	if r == nil {
		fail(w, 404)
		return
	}

	r.ServeHTTP(ctx, w, req)
}
Beispiel #5
0
func ContextInjector(componentName string, handler http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		reqID := req.Header.Get("X-Request-ID")
		if reqID == "" {
			reqID = random.UUID()
		}
		ctx := ctxhelper.NewContextRequestID(context.Background(), reqID)
		ctx = ctxhelper.NewContextComponentName(ctx, componentName)
		rw := NewResponseWriter(w, ctx)
		handler.ServeHTTP(rw, req)
	})
}
Beispiel #6
0
func ExampleWithTimeout() {
	// Pass a context with a timeout to tell a blocking function that it
	// should abandon its work after the timeout elapses.
	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
	select {
	case <-time.After(200 * time.Millisecond):
		fmt.Println("overslept")
	case <-ctx.Done():
		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
	}
	// Output:
	// context deadline exceeded
}
Beispiel #7
0
func (p *v2Puller) pullV2Repository(tag string) (err error) {
	var tags []string
	taggedName := p.repoInfo.LocalName
	if len(tag) > 0 {
		tags = []string{tag}
		taggedName = utils.ImageReference(p.repoInfo.LocalName, tag)
	} else {
		var err error

		manSvc, err := p.repo.Manifests(context.Background())
		if err != nil {
			return err
		}

		tags, err = manSvc.Tags()
		if err != nil {
			return err
		}

	}

	poolKey := "v2:" + taggedName
	broadcaster, found := p.poolAdd("pull", poolKey)
	broadcaster.Add(p.config.OutStream)
	if found {
		// Another pull of the same repository is already taking place; just wait for it to finish
		return broadcaster.Wait()
	}

	// This must use a closure so it captures the value of err when the
	// function returns, not when the 'defer' is evaluated.
	defer func() {
		p.poolRemoveWithError("pull", poolKey, err)
	}()

	var layersDownloaded bool
	for _, tag := range tags {
		// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
		// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
		pulledNew, err := p.pullV2Tag(broadcaster, tag, taggedName)
		if err != nil {
			return err
		}
		layersDownloaded = layersDownloaded || pulledNew
	}

	writeStatus(taggedName, broadcaster, p.sf, layersDownloaded)

	return nil
}
Beispiel #8
0
func (l *TCPListener) Start() error {
	ctx := context.Background() // TODO(benburkert): make this an argument
	ctx, l.stopSync = context.WithCancel(ctx)

	if l.Watcher != nil {
		return errors.New("router: tcp listener already started")
	}
	if l.wm == nil {
		l.wm = NewWatchManager()
	}
	l.Watcher = l.wm

	if l.ds == nil {
		return errors.New("router: tcp listener missing data store")
	}
	l.DataStoreReader = l.ds

	l.services = make(map[string]*tcpService)
	l.routes = make(map[string]*tcpRoute)
	l.ports = make(map[int]*tcpRoute)
	l.listeners = make(map[int]net.Listener)

	if l.startPort != 0 && l.endPort != 0 {
		for i := l.startPort; i <= l.endPort; i++ {
			addr := fmt.Sprintf("%s:%d", l.IP, i)
			listener, err := listenFunc("tcp4", addr)
			if err != nil {
				l.Close()
				return listenErr{addr, err}
			}
			l.listeners[i] = listener
		}
	}

	// TODO(benburkert): the sync API cannot handle routes deleted while the
	// listen/notify connection is disconnected
	if err := l.startSync(ctx); err != nil {
		l.Close()
		return err
	}

	return nil
}
Beispiel #9
0
func TestExchangeRequest_NonBasicAuth(t *testing.T) {
	tr := &mockTransport{
		rt: func(r *http.Request) (w *http.Response, err error) {
			headerAuth := r.Header.Get("Authorization")
			if headerAuth != "" {
				t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
			}
			return nil, errors.New("no response")
		},
	}
	c := &http.Client{Transport: tr}
	conf := &Config{
		ClientID: "CLIENT_ID",
		Endpoint: Endpoint{
			AuthURL:  "https://accounts.google.com/auth",
			TokenURL: "https://accounts.google.com/token",
		},
	}

	ctx := context.WithValue(context.Background(), HTTPClient, c)
	conf.Exchange(ctx, "code")
}
Beispiel #10
0
func (t *transport) UpgradeHTTP(req *http.Request) (*http.Response, net.Conn, error) {
	stickyBackend := t.getStickyBackend(req)
	backends := t.getOrderedBackends(stickyBackend)
	upconn, addr, err := dialTCP(context.Background(), backends)
	if err != nil {
		return nil, nil, err
	}
	conn := &streamConn{bufio.NewReader(upconn), upconn}
	req.URL.Host = addr

	if err := req.Write(conn); err != nil {
		conn.Close()
		return nil, nil, err
	}
	res, err := http.ReadResponse(conn.Reader, req)
	if err != nil {
		conn.Close()
		return nil, nil, err
	}
	t.setStickyBackend(res, stickyBackend)
	return res, conn, nil
}
Beispiel #11
0
func (s *HTTPListener) Start() error {
	ctx := context.Background() // TODO(benburkert): make this an argument
	ctx, s.stopSync = context.WithCancel(ctx)

	if s.Watcher != nil {
		return errors.New("router: http listener already started")
	}
	if s.wm == nil {
		s.wm = NewWatchManager()
	}
	s.Watcher = s.wm

	if s.ds == nil {
		return errors.New("router: http listener missing data store")
	}
	s.DataStoreReader = s.ds

	s.routes = make(map[string]*httpRoute)
	s.domains = make(map[string]*httpRoute)
	s.services = make(map[string]*httpService)

	if s.cookieKey == nil {
		s.cookieKey = &[32]byte{}
	}

	// TODO(benburkert): the sync API cannot handle routes deleted while the
	// listen/notify connection is disconnected
	if err := s.startSync(ctx); err != nil {
		return err
	}

	if err := s.startListen(); err != nil {
		s.stopSync()
		return err
	}

	return nil
}
Beispiel #12
0
func (s *tcpService) ServeConn(conn net.Conn) {
	s.rp.ServeConn(context.Background(), proxy.CloseNotifyConn(conn))
}
Beispiel #13
0
func (p *v2Pusher) pushV2Tag(tag string) error {
	logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag)

	layerID, exists := p.localRepo[tag]
	if !exists {
		return fmt.Errorf("tag does not exist: %s", tag)
	}

	layersSeen := make(map[string]bool)

	layer, err := p.graph.Get(layerID)
	if err != nil {
		return err
	}

	m := &manifest.Manifest{
		Versioned: manifest.Versioned{
			SchemaVersion: 1,
		},
		Name:         p.repo.Name(),
		Tag:          tag,
		Architecture: layer.Architecture,
		FSLayers:     []manifest.FSLayer{},
		History:      []manifest.History{},
	}

	var metadata runconfig.Config
	if layer != nil && layer.Config != nil {
		metadata = *layer.Config
	}

	out := p.config.OutStream

	for ; layer != nil; layer, err = p.graph.GetParent(layer) {
		if err != nil {
			return err
		}

		// break early if layer has already been seen in this image,
		// this prevents infinite loops on layers which loopback, this
		// cannot be prevented since layer IDs are not merkle hashes
		// TODO(dmcgowan): throw error if no valid use case is found
		if layersSeen[layer.ID] {
			break
		}

		logrus.Debugf("Pushing layer: %s", layer.ID)

		if layer.Config != nil && metadata.Image != layer.ID {
			if err := runconfig.Merge(&metadata, layer.Config); err != nil {
				return err
			}
		}

		var exists bool
		dgst, err := p.graph.GetLayerDigest(layer.ID)
		switch err {
		case nil:
			if p.layersPushed[dgst] {
				exists = true
				// break out of switch, it is already known that
				// the push is not needed and therefore doing a
				// stat is unnecessary
				break
			}
			_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
			switch err {
			case nil:
				exists = true
				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
			case distribution.ErrBlobUnknown:
				// nop
			default:
				out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
				return err
			}
		case ErrDigestNotSet:
			// nop
		case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported:
			return fmt.Errorf("error getting image checksum: %v", err)
		}

		// if digest was empty or not saved, or if blob does not exist on the remote repository,
		// then fetch it.
		if !exists {
			var pushDigest digest.Digest
			if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
				return err
			}
			if dgst == "" {
				// Cache new checksum
				if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil {
					return err
				}
			}
			dgst = pushDigest
		}

		// read v1Compatibility config, generate new if needed
		jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID)
		if err != nil {
			return err
		}

		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})

		layersSeen[layer.ID] = true
		p.layersPushed[dgst] = true
	}

	// Fix parent chain if necessary
	if err = fixHistory(m); err != nil {
		return err
	}

	logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
	signed, err := manifest.Sign(m, p.trustKey)
	if err != nil {
		return err
	}

	manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name())
	if err != nil {
		return err
	}
	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize))
	}

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return err
	}
	return manSvc.Put(signed)
}
Beispiel #14
0
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
	out := p.config.OutStream

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))

	image, err := p.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := p.graph.TarLayer(image)
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// Send the layer
	layerUpload, err := bs.Create(context.Background())
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(arch), // we'll take care of close here.
		Out:       out,
		Formatter: p.sf,

		// TODO(stevvooe): This may cause a size reporting error. Try to get
		// this from tar-split or elsewhere. The main issue here is that we
		// don't want to buffer to disk *just* to calculate the size.
		Size: img.Size,

		NewLines: false,
		ID:       stringid.TruncateID(img.ID),
		Action:   "Pushing",
	})

	digester := digest.Canonical.New()
	// HACK: The MultiWriter doesn't write directly to layerUpload because
	// we must make sure the ReadFrom is used, not Write. Using Write would
	// send a PATCH request for every Write call.
	pipeReader, pipeWriter := io.Pipe()
	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
	bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize)
	compressor := gzip.NewWriter(bufWriter)

	go func() {
		_, err := io.Copy(compressor, reader)
		if err == nil {
			err = compressor.Close()
		}
		if err == nil {
			err = bufWriter.Flush()
		}
		if err != nil {
			pipeWriter.CloseWithError(err)
		} else {
			pipeWriter.Close()
		}
	}()

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
	nn, err := layerUpload.ReadFrom(pipeReader)
	pipeReader.Close()
	if err != nil {
		return "", err
	}

	dgst := digester.Digest()
	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
		return "", err
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))

	return dgst, nil
}
Beispiel #15
0
// NewV2Repository returns a repository (v2 only). It creates a HTTP transport
// providing timeout settings and authentication support, and also verifies the
// remote API version.
func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig, actions ...string) (distribution.Repository, error) {
	ctx := context.Background()

	repoName := repoInfo.CanonicalName
	// If endpoint does not support CanonicalName, use the RemoteName instead
	if endpoint.TrimHostname {
		repoName = repoInfo.RemoteName
	}

	// TODO(dmcgowan): Call close idle connections when complete, use keep alive
	base := &http.Transport{
		Proxy: http.ProxyFromEnvironment,
		Dial: (&net.Dialer{
			Timeout:   30 * time.Second,
			KeepAlive: 30 * time.Second,
			DualStack: true,
		}).Dial,
		TLSHandshakeTimeout: 10 * time.Second,
		TLSClientConfig:     endpoint.TLSConfig,
		// TODO(dmcgowan): Call close idle connections when complete and use keep alive
		DisableKeepAlives: true,
	}

	modifiers := registry.DockerHeaders(metaHeaders)
	authTransport := transport.NewTransport(base, modifiers...)
	pingClient := &http.Client{
		Transport: authTransport,
		Timeout:   15 * time.Second,
	}
	endpointStr := endpoint.URL + "/v2/"
	req, err := http.NewRequest("GET", endpointStr, nil)
	if err != nil {
		return nil, err
	}
	resp, err := pingClient.Do(req)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()

	versions := auth.APIVersions(resp, endpoint.VersionHeader)
	if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 {
		var foundVersion bool
		for _, version := range endpoint.Versions {
			for _, pingVersion := range versions {
				if version == pingVersion {
					foundVersion = true
				}
			}
		}
		if !foundVersion {
			return nil, errors.New("endpoint does not support v2 API")
		}
	}

	challengeManager := auth.NewSimpleChallengeManager()
	if err := challengeManager.AddResponse(resp); err != nil {
		return nil, err
	}

	creds := dumbCredentialStore{auth: authConfig}
	tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...)
	basicHandler := auth.NewBasicHandler(creds)
	modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
	tr := transport.NewTransport(base, modifiers...)

	return client.NewRepository(ctx, repoName, endpoint.URL, tr)
}
Beispiel #16
0
func (r *tcpRoute) ServeConn(conn net.Conn) {
	r.rp.ServeConn(context.Background(), connutil.CloseNotifyConn(conn))
}
Beispiel #17
0
func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (tagUpdated bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
	}
	var verifiedManifest *manifest.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, tag)
	if err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	imgs, err := p.getImageInfos(verifiedManifest)
	if err != nil {
		return false, err
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)

		for _, d := range downloads {
			p.poolRemoveWithError("pull", d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		img := imgs[i]

		p.graph.Retain(p.sessionID, img.id)
		layerIDs = append(layerIDs, img.id)

		p.graph.imageMutex.Lock(img.id)

		// Check if exists
		if p.graph.Exists(img.id) {
			if err := p.validateImageInGraph(img.id, imgs, i); err != nil {
				p.graph.imageMutex.Unlock(img.id)
				return false, fmt.Errorf("image validation failed: %v", err)
			}
			logrus.Debugf("Image already exists: %s", img.id)
			p.graph.imageMutex.Unlock(img.id)
			continue
		}
		p.graph.imageMutex.Unlock(img.id)

		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:      img,
			imgIndex: i,
			poolKey:  "v2layer:" + img.id,
			digest:   verifiedManifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}
		d.tmpFile = tmpFile

		downloads = append(downloads, d)

		broadcaster, found := p.poolAdd("pull", d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}
			continue
		}

		d.tmpFile.Seek(0, 0)
		err := func() error {
			reader := progressreader.New(progressreader.Config{
				In:        d.tmpFile,
				Out:       d.broadcaster,
				Formatter: p.sf,
				Size:      d.size,
				NewLines:  false,
				ID:        stringid.TruncateID(d.img.id),
				Action:    "Extracting",
			})

			p.graph.imagesMutex.Lock()
			defer p.graph.imagesMutex.Unlock()

			p.graph.imageMutex.Lock(d.img.id)
			defer p.graph.imageMutex.Unlock(d.img.id)

			// Must recheck the data on disk if any exists.
			// This protects against races where something
			// else is written to the graph under this ID
			// after attemptIDReuse.
			if p.graph.Exists(d.img.id) {
				if err := p.validateImageInGraph(d.img.id, imgs, d.imgIndex); err != nil {
					return fmt.Errorf("image validation failed: %v", err)
				}
			}

			if err := p.graph.register(d.img, reader); err != nil {
				return err
			}

			if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil {
				return err
			}

			if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil {
				return err
			}

			return nil
		}()
		if err != nil {
			return false, err
		}

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #18
0
func (s *LogAggregatorTestSuite) TestAggregatorReadLastNAndSubscribe(c *C) {
	runTest := func(lines int, filter Filter, expectedBefore, expectedSubMsgs, unexpectedSubMsgs []string) {
		// set up testing hook:
		messageReceived := make(chan struct{})
		afterMessage = func() {
			messageReceived <- struct{}{}
		}
		defer func() { afterMessage = nil }()

		delete(s.agg.buffers, "app") // reset the buffer
		conn, err := net.Dial("tcp", s.agg.Addr)
		c.Assert(err, IsNil)
		defer conn.Close()

		_, err = conn.Write([]byte(sampleLogLine1))
		c.Assert(err, IsNil)
		_, err = conn.Write([]byte(sampleLogLine2))
		c.Assert(err, IsNil)

		for i := 0; i < 2; i++ {
			<-messageReceived // wait for messages to be received
		}

		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()

		if filter == nil {
			filter = nopFilter
		}

		msgc := s.agg.ReadLastNAndSubscribe("app", lines, filter, ctx.Done())
		timeout := time.After(5 * time.Second)

		for _, expectedMsg := range expectedBefore {
			select {
			case msg := <-msgc:
				c.Assert(msg, Not(IsNil))
				c.Assert(string(msg.Msg), Equals, expectedMsg)
			case <-timeout:
				c.Fatalf("timeout waiting for receive on msgc of %q", expectedMsg)
			}
		}
		select {
		case msg := <-msgc:
			c.Fatalf("unexpected message received: %+v", msg)
		default:
		}

		// make sure we skip messages we don't want
		for _, rawMsg := range unexpectedSubMsgs {
			_, err = conn.Write([]byte(rawMsg))
			c.Assert(err, IsNil)
			<-messageReceived // wait for message to be received

			select {
			case msg := <-msgc:
				c.Fatalf("received unexpected msg: %s", string(msg.Msg))
			default:
			}
		}
		// make sure we get messages we do want
		for _, rawMsg := range expectedSubMsgs {
			_, err = conn.Write([]byte(rawMsg))
			c.Assert(err, IsNil)
			<-messageReceived // wait for message to be received

			select {
			case msg := <-msgc:
				c.Assert(strings.HasSuffix(rawMsg, string(msg.Msg)), Equals, true)
			case <-timeout:
				c.Fatalf("timeout waiting for expected message on msgc: %q", rawMsg)
			}
		}
	}

	tests := []struct {
		lines             int
		filter            Filter
		expectedBefore    []string
		expectedSubMsgs   []string
		unexpectedSubMsgs []string
	}{
		{
			lines: -1,
			expectedBefore: []string{
				"Starting process with command `bundle exec rackup config.ru -p 24405`",
				"25 yay this is a message!!!\n",
			},
			expectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1",
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2",
			},
			unexpectedSubMsgs: []string{
				"68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message",
			},
		},
		{
			lines: 1,
			expectedBefore: []string{
				"25 yay this is a message!!!\n",
			},
			expectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1",
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2",
			},
			unexpectedSubMsgs: []string{
				"68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message",
			},
		},
		{
			lines:  -1,
			filter: filterJobID("2"),
			expectedBefore: []string{
				"25 yay this is a message!!!\n",
			},
			expectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2",
				"68 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message",
			},
			unexpectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1",
				"70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1",
				"68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message",
			},
		},
		{
			lines:          0,
			filter:         filterProcessType("web"),
			expectedBefore: []string{},
			expectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2",
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1",
			},
			unexpectedSubMsgs: []string{
				"70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1",
				"70 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message 2",
				"68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message",
			},
		},
		{
			lines:  1,
			filter: filterJobID("2"),
			expectedBefore: []string{
				"25 yay this is a message!!!\n",
			},
			expectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.2 - - message 2",
				"68 <40>1 2012-11-30T07:12:53+00:00 host app worker.2 - - worker message",
			},
			unexpectedSubMsgs: []string{
				"60 <40>1 2012-11-30T07:12:53+00:00 host app web.1 - - message 1",
				"70 <40>1 2012-11-30T07:12:53+00:00 host app worker.1 - - worker message 1",
				"68 <40>1 2012-11-30T07:12:53+00:00 host notapp web.1 - - not my message",
			},
		},
	}
	for i, test := range tests {
		c.Logf("testing num=%d lines=%d filter=%v", i, test.lines, test.filter)
		runTest(test.lines, test.filter, test.expectedBefore, test.expectedSubMsgs, test.unexpectedSubMsgs)
	}
}
Beispiel #19
0
	if key == "instance.id" {
		ic.once.Do(func() {
			// We want to lazy initialize the UUID such that we don't
			// call a random generator from the package initialization
			// code. For various reasons random could not be available
			// https://github.com/docker/distribution/issues/782
			ic.id = uuid.Generate().String()
		})
		return ic.id
	}

	return ic.Context.Value(key)
}

var background = &instanceContext{
	Context: context.Background(),
}

// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() Context {
	return background
}

// WithValue returns a copy of parent in which the value associated with key is
// val. Use context Values only for request-scoped data that transits processes
// and APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key, val interface{}) Context {
	return context.WithValue(parent, key, val)
}