Ejemplo n.º 1
0
func AddMockImageToCache() {
	mockImage := &metadata.ImageConfig{
		ImageID: "e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2",
		Name:    "busybox",
		Tags:    []string{"latest"},
	}
	mockImage.Config = &container.Config{
		Hostname:     "55cd1f8f6e5b",
		Domainname:   "",
		User:         "",
		AttachStdin:  false,
		AttachStdout: false,
		AttachStderr: false,
		Tty:          false,
		OpenStdin:    false,
		StdinOnce:    false,
		Env:          []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
		Cmd:          []string{"sh"},
		Image:        "sha256:e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2",
		Volumes:      nil,
		WorkingDir:   "",
		Entrypoint:   nil,
		OnBuild:      nil,
	}

	cache.ImageCache().AddImage(mockImage)
}
Ejemplo n.º 2
0
Archivo: image.go Proyecto: vmware/vic
// Docker Inspect.  LookupImage looks up an image by name and returns it as an
// ImageInspect structure.
func (i *Image) LookupImage(name string) (*types.ImageInspect, error) {
	defer trace.End(trace.Begin("LookupImage (docker inspect)"))

	imageConfig, err := cache.ImageCache().Get(name)
	if err != nil {
		return nil, err
	}

	return imageConfigToDockerImageInspect(imageConfig, ProductName()), nil
}
Ejemplo n.º 3
0
Archivo: image.go Proyecto: kjplatz/vic
func (i *Image) PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
	defer trace.End(trace.Begin("PullImage"))

	log.Debugf("PullImage: ref = %+v, metaheaders = %+v\n", ref, metaHeaders)

	var cmdArgs []string

	cmdArgs = append(cmdArgs, "-reference", ref.String())

	if authConfig != nil {
		if len(authConfig.Username) > 0 {
			cmdArgs = append(cmdArgs, "-username", authConfig.Username)
		}
		if len(authConfig.Password) > 0 {
			cmdArgs = append(cmdArgs, "-password", authConfig.Password)
		}
	}

	portLayerServer := PortLayerServer()

	if portLayerServer != "" {
		cmdArgs = append(cmdArgs, "-host", portLayerServer)
	}

	// intruct imagec to use os.TempDir
	cmdArgs = append(cmdArgs, "-destination", os.TempDir())

	log.Debugf("PullImage: cmd = %s %+v\n", Imagec, cmdArgs)

	cmd := exec.Command(Imagec, cmdArgs...)
	cmd.Stdout = outStream
	cmd.Stderr = outStream

	// Execute
	err := cmd.Start()

	if err != nil {
		log.Errorf("Error starting %s - %s\n", Imagec, err)
		return fmt.Errorf("Error starting %s - %s\n", Imagec, err)
	}

	err = cmd.Wait()

	if err != nil {
		log.Errorf("imagec exit code: %s", err)
		return err
	}

	client := PortLayerClient()
	cache.ImageCache().Update(client)
	return nil
}
Ejemplo n.º 4
0
// ContainerCreate creates a container.
func (c *Container) ContainerCreate(config types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
	defer trace.End(trace.Begin(""))

	var err error

	// bail early if container name already exists
	if exists := cache.ContainerCache().GetContainer(config.Name); exists != nil {
		err := fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to re use that name.", config.Name, exists.ContainerID)
		log.Errorf("%s", err.Error())
		return types.ContainerCreateResponse{}, derr.NewRequestConflictError(err)
	}

	// get the image from the cache
	image, err := cache.ImageCache().Get(config.Config.Image)
	if err != nil {
		// if no image found then error thrown and a pull
		// will be initiated by the docker client
		log.Errorf("ContainerCreate: image %s error: %s", config.Config.Image, err.Error())
		return types.ContainerCreateResponse{}, derr.NewRequestNotFoundError(err)
	}

	setCreateConfigOptions(config.Config, image.Config)

	log.Debugf("config.Config = %+v", config.Config)
	if err = validateCreateConfig(&config); err != nil {
		return types.ContainerCreateResponse{}, err
	}

	// Create a container representation in the personality server.  This representation
	// will be stored in the cache if create succeeds in the port layer.
	container, err := createInternalVicContainer(image, &config)
	if err != nil {
		return types.ContainerCreateResponse{}, err
	}

	// Create an actualized container in the VIC port layer
	id, err := c.containerCreate(container, config)
	if err != nil {
		return types.ContainerCreateResponse{}, err
	}

	// Container created ok, save the container id and save the config override from the API
	// caller and save this container internal representation in our personality server's cache
	copyConfigOverrides(container, config)
	container.ContainerID = id
	cache.ContainerCache().AddContainer(container)

	log.Debugf("Container create - name(%s), containerID(%s), config(%#v), host(%#v)",
		container.Name, container.ContainerID, container.Config, container.HostConfig)

	return types.ContainerCreateResponse{ID: id}, nil
}
Ejemplo n.º 5
0
Archivo: image.go Proyecto: vmware/vic
func (i *Image) Images(filterArgs string, filter string, all bool) ([]*types.Image, error) {
	defer trace.End(trace.Begin("Images"))

	images := cache.ImageCache().GetImages()

	result := make([]*types.Image, 0, len(images))

	for _, image := range images {
		result = append(result, convertV1ImageToDockerImage(image))
	}

	// sort on creation time
	sort.Sort(sort.Reverse(byCreated(result)))

	return result, nil
}
Ejemplo n.º 6
0
func Init(portLayerAddr, product string, config *config.VirtualContainerHostConfigSpec) error {
	_, _, err := net.SplitHostPort(portLayerAddr)
	if err != nil {
		return err
	}

	vchConfig = config
	productName = product

	if config != nil {
		productVersion = config.Version
		if productVersion == "" {
			portLayerName = product + " Backend Engine"
		} else {
			portLayerName = product + " " + productVersion + " Backend Engine"
		}
	} else {
		portLayerName = product + " Backend Engine"
	}

	t := httptransport.New(portLayerAddr, "/", []string{"http"})
	portLayerClient = client.New(t, nil)
	portLayerServerAddr = portLayerAddr

	// attempt to update the image cache at startup
	log.Info("Refreshing image cache...")
	go func() {
		for i := 0; i < Retries; i++ {

			// initial pause to wait for the portlayer to come up
			time.Sleep(RetryTimeSeconds * time.Second)

			if err := cache.ImageCache().Update(portLayerClient); err == nil {
				log.Info("Image cache updated successfully")
				return
			}
			log.Info("Failed to refresh image cache, retrying...")
		}
		log.Warn("Failed to refresh image cache. Is the portlayer server down?")
	}()

	return nil
}
Ejemplo n.º 7
0
func AddMockContainerToCache() {
	AddMockImageToCache()

	image, err := cache.ImageCache().GetImage("e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2")
	if err == nil {
		vc := viccontainer.NewVicContainer()
		vc.ImageID = image.ID
		vc.Config = image.Config //Set defaults.  Overrides will get copied below.
		vc.Config.Tty = false
		vc.ContainerID = dummyContainerID

		cache.ContainerCache().AddContainer(vc)

		vc = viccontainer.NewVicContainer()
		vc.ImageID = image.ID
		vc.Config = image.Config
		vc.Config.Tty = true
		vc.ContainerID = dummyContainerID_tty

		cache.ContainerCache().AddContainer(vc)
	}
}
Ejemplo n.º 8
0
Archivo: image.go Proyecto: vmware/vic
// TODO fix the errors so the client doesnt print the generic POST or DELETE message
func (i *Image) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
	defer trace.End(trace.Begin(imageRef))

	var deleted []types.ImageDelete
	var userRefIsID bool
	var imageRemoved bool

	// Use the image cache to go from the reference to the ID we use in the image store
	img, err := cache.ImageCache().Get(imageRef)
	if err != nil {
		return nil, err
	}

	// Get the tags from the repo cache for this image
	// TODO: remove this -- we have it in the image above
	tags := cache.RepositoryCache().Tags(img.ImageID)

	// did the user pass an id or partial id
	userRefIsID = cache.ImageCache().IsImageID(imageRef)
	// do we have any reference conflicts
	if len(tags) > 1 && userRefIsID && !force {
		t := uid.Parse(img.ImageID).Truncate()
		return nil,
			fmt.Errorf("conflict: unable to delete %s (must be forced) - image is referenced in one or more repositories", t)
	}

	// if we have an ID or only 1 tag lets delete the vmdk(s) via the PL
	if userRefIsID || len(tags) == 1 {
		log.Infof("Deleting image via PL %s (%s)", img.ImageID, img.ID)

		// needed for image store
		host, err := sys.UUID()
		if err != nil {
			return nil, err
		}

		params := storage.NewDeleteImageParamsWithContext(ctx).WithStoreName(host).WithID(img.ID)
		// TODO: This will fail if any containerVMs are referencing the vmdk - vanilla docker
		// allows the removal of an image (via force flag) even if a container is referencing it
		// should vic?
		_, err = PortLayerClient().Storage.DeleteImage(params)
		if err != nil {
			switch err := err.(type) {
			case *storage.DeleteImageLocked:
				return nil, fmt.Errorf("Failed to remove image %q: %s", imageRef, err.Payload.Message)
			default:
				return nil, err
			}
		}

		// we've deleted the image so remove from cache
		cache.ImageCache().RemoveImageByConfig(img)
		imagec.LayerCache().Remove(img.ID)
		imageRemoved = true

	} else {

		// only untag the ref supplied
		n, err := reference.ParseNamed(imageRef)
		if err != nil {
			return nil, fmt.Errorf("unable to parse reference(%s): %s", imageRef, err.Error())
		}
		tag := reference.WithDefaultTag(n)
		tags = []string{tag.String()}
	}
	// loop thru and remove from repoCache
	for i := range tags {
		// remove from cache, but don't save -- we'll do that afer all
		// updates
		refNamed, _ := cache.RepositoryCache().Remove(tags[i], false)
		dd := types.ImageDelete{Untagged: refNamed}
		deleted = append(deleted, dd)
	}

	// save repo now -- this will limit the number of PL
	// calls to one per rmi call
	err = cache.RepositoryCache().Save()
	if err != nil {
		return nil, fmt.Errorf("Untag error: %s", err.Error())
	}

	if imageRemoved {
		imageDeleted := types.ImageDelete{Deleted: img.ImageID}
		deleted = append(deleted, imageDeleted)
	}

	return deleted, err
}
Ejemplo n.º 9
0
// makeDownloadFunc returns a func used by xfer.TransferManager to download a layer
func (ldm *LayerDownloader) makeDownloadFunc(layer *ImageWithMeta, ic *ImageC, parentDownload *downloadTransfer, layers []*ImageWithMeta) xfer.DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) xfer.Transfer {

		d := &downloadTransfer{
			Transfer: xfer.NewTransfer(),
			layer:    layer,
		}

		go func() {

			defer func() {
				close(progressChan)

				// remove layer from cache if there was an error attempting to download
				if d.err != nil {
					LayerCache().Remove(layer.ID)
				}

			}()

			progressOutput := progress.ChanOutput(progressChan)

			// wait for TransferManager to give the go-ahead
			select {
			case <-start:
			default:
				progress.Update(progressOutput, layer.String(), "Waiting")
				<-start
			}

			if parentDownload != nil {
				// bail if parent download failed or was cancelled
				select {
				case <-parentDownload.Done():
					if err := parentDownload.result(); err != nil {
						d.err = err
						return
					}
				default:
				}
			}

			// fetch blob
			diffID, err := FetchImageBlob(d.Transfer.Context(), ic.Options, layer, progressOutput)
			if err != nil {
				d.err = fmt.Errorf("%s/%s returned %s", ic.Image, layer.ID, err)
				return
			}

			layer.DiffID = diffID

			close(inactive)

			if parentDownload != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer download cancelled")
					return
				default:
					<-parentDownload.Done() // block until parent download completes
				}

				if err := parentDownload.result(); err != nil {
					d.err = err
					return
				}
			}

			// is this the leaf layer?
			imageLayer := layer.ID == layers[0].ID

			// if this is the leaf layer, we are done and can now create the image config
			if imageLayer {
				imageConfig, err := ic.CreateImageConfig(layers)
				if err != nil {
					d.err = err
					return
				}
				// cache and persist the image
				cache.ImageCache().Add(&imageConfig)
				cache.ImageCache().Save()

				// place calculated ImageID in struct
				ic.ImageID = imageConfig.ImageID

				if err = updateRepositoryCache(ic); err != nil {
					d.err = err
					return
				}

			}

			ldm.m.Lock()
			defer ldm.m.Unlock()

			// Write blob to the storage layer
			if err := ic.WriteImageBlob(layer, progressOutput, imageLayer); err != nil {
				d.err = err
				return
			}

			// mark the layer as finished downloading
			LayerCache().Commit(layer)

			ldm.unregisterDownload(layer)

		}()

		return d
	}
}
Ejemplo n.º 10
0
Archivo: system.go Proyecto: vmware/vic
func getImageCount() int {
	images := cache.ImageCache().GetImages()
	return len(images)
}
Ejemplo n.º 11
0
// containerConfigFromContainerInfo() returns a container.Config that has attributes
// overridden at create or start time.  This is important.  This function is called
// to help build the Container Inspect struct.  That struct contains the original
// container config that is part of the image metadata AND the overriden container
// config.  The user can override these via the remote API or the docker CLI.
func containerConfigFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo) *container.Config {
	if vc == nil || vc.Config == nil || info == nil || info.ContainerConfig == nil || info.ProcessConfig == nil {
		return nil
	}

	// Copy the working copy of our container's config
	container := *vc.Config

	if info.ContainerConfig.ContainerID != nil {
		container.Hostname = stringid.TruncateID(*info.ContainerConfig.ContainerID) // Hostname
	}
	if info.ContainerConfig.AttachStdin != nil {
		container.AttachStdin = *info.ContainerConfig.AttachStdin // Attach the standard input, makes possible user interaction
	}
	if info.ContainerConfig.AttachStdout != nil {
		container.AttachStdout = *info.ContainerConfig.AttachStdout // Attach the standard output
	}
	if info.ContainerConfig.AttachStderr != nil {
		container.AttachStderr = *info.ContainerConfig.AttachStderr // Attach the standard error
	}
	if info.ContainerConfig.Tty != nil {
		container.Tty = *info.ContainerConfig.Tty // Attach standard streams to a tty, including stdin if it is not closed.
	}
	// They are not coming from PL so set them to true unconditionally
	container.OpenStdin = true // Open stdin
	container.StdinOnce = true

	if info.ContainerConfig.LayerID != nil {
		container.Image = *info.ContainerConfig.LayerID // Name of the image as it was passed by the operator (eg. could be symbolic)
	}
	if info.ContainerConfig.Labels != nil {
		container.Labels = info.ContainerConfig.Labels // List of labels set to this container
	}

	// Fill in information about the process
	if info.ProcessConfig.Env != nil {
		container.Env = info.ProcessConfig.Env // List of environment variable to set in the container
	}
	if info.ProcessConfig.ExecPath != nil {
		container.Cmd = append(container.Cmd, *info.ProcessConfig.ExecPath) // Command to run when starting the container
	}
	if info.ProcessConfig.ExecArgs != nil {
		container.Cmd = append(container.Cmd, info.ProcessConfig.ExecArgs[1:]...)
	}
	if info.ProcessConfig.WorkingDir != nil {
		container.WorkingDir = *info.ProcessConfig.WorkingDir // Current directory (PWD) in the command will be launched
	}

	// Fill in information about the container network
	if info.ScopeConfig == nil {
		container.NetworkDisabled = true
	} else {
		container.NetworkDisabled = false
		container.MacAddress = ""
		container.ExposedPorts = vc.Config.ExposedPorts
		container.PublishService = "" // Name of the network service exposed by the container
	}

	// Get the original container config from the image's metadata in our image cache.
	var imageConfig *metadata.ImageConfig

	if info.ContainerConfig.LayerID != nil {
		imageConfig, _ = cache.ImageCache().GetImage(*info.ContainerConfig.LayerID)
	}

	// Fill in the values with defaults from the original image's container config
	// structure
	if imageConfig != nil {
		container.StopSignal = imageConfig.ContainerConfig.StopSignal // Signal to stop a container

		container.OnBuild = imageConfig.ContainerConfig.OnBuild // ONBUILD metadata that were defined on the image Dockerfile

		// Fill in information about the container's volumes
		// FIXME:  Why does types.ContainerJSON have Mounts and also ContainerConfig,
		// which also has Volumes?  Assuming this is a copy from image's container
		// config till we figure this out.
		container.Volumes = imageConfig.ContainerConfig.Volumes
	}

	return &container
}
Ejemplo n.º 12
0
Archivo: imagec.go Proyecto: vmware/vic
// CreateImageConfig constructs the image metadata from layers that compose the image
func (ic *ImageC) CreateImageConfig(images []*ImageWithMeta) (metadata.ImageConfig, error) {

	imageLayer := images[0] // the layer that represents the actual image

	// if we already have an imageID associated with this layerID, we don't need
	// to calculate imageID and can just grab the image config from the cache
	id := cache.RepositoryCache().GetImageID(imageLayer.ID)
	if image, err := cache.ImageCache().Get(id); err == nil {
		return *image, nil
	}

	manifest := ic.ImageManifest
	image := docker.V1Image{}
	rootFS := docker.NewRootFS()
	history := make([]docker.History, 0, len(images))
	diffIDs := make(map[string]string)
	var size int64

	// step through layers to get command history and diffID from oldest to newest
	for i := len(images) - 1; i >= 0; i-- {
		layer := images[i]
		if err := json.Unmarshal([]byte(layer.Meta), &image); err != nil {
			return metadata.ImageConfig{}, fmt.Errorf("Failed to unmarshall layer history: %s", err)
		}
		h := docker.History{
			Created:   image.Created,
			Author:    image.Author,
			CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "),
			Comment:   image.Comment,
		}
		history = append(history, h)
		rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.DiffID))
		diffIDs[layer.DiffID] = layer.ID
		size += layer.Size
	}

	// result is constructed without unused fields
	result := docker.Image{
		V1Image: docker.V1Image{
			Comment:         image.Comment,
			Created:         image.Created,
			Container:       image.Container,
			ContainerConfig: image.ContainerConfig,
			DockerVersion:   image.DockerVersion,
			Author:          image.Author,
			Config:          image.Config,
			Architecture:    image.Architecture,
			OS:              image.OS,
		},
		RootFS:  rootFS,
		History: history,
	}

	bytes, err := result.MarshalJSON()
	if err != nil {
		return metadata.ImageConfig{}, fmt.Errorf("Failed to marshall image metadata: %s", err)
	}

	// calculate image ID
	sum := fmt.Sprintf("%x", sha256.Sum256(bytes))
	log.Infof("Image ID: sha256:%s", sum)

	// prepare metadata
	result.V1Image.Parent = image.Parent
	result.Size = size
	result.V1Image.ID = imageLayer.ID
	imageConfig := metadata.ImageConfig{
		V1Image: result.V1Image,
		ImageID: sum,
		// TODO: this will change when issue 1186 is
		// implemented -- only populate the digests when pulled by digest
		Digests:   []string{manifest.Digest},
		Tags:      []string{ic.Tag},
		Name:      manifest.Name,
		DiffIDs:   diffIDs,
		History:   history,
		Reference: ic.Reference,
	}

	return imageConfig, nil
}