func AddMockImageToCache() { mockImage := &metadata.ImageConfig{ ImageID: "e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2", Name: "busybox", Tags: []string{"latest"}, Reference: "busybox:latest", } mockImage.Config = &container.Config{ Hostname: "55cd1f8f6e5b", Domainname: "", User: "", AttachStdin: false, AttachStdout: false, AttachStderr: false, Tty: false, OpenStdin: false, StdinOnce: false, Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, Cmd: []string{"sh"}, Image: "sha256:e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2", Volumes: nil, WorkingDir: "", Entrypoint: nil, OnBuild: nil, } cache.ImageCache().Add(mockImage) ref, _ := reference.ParseNamed(mockImage.Reference) cache.RepositoryCache().AddReference(ref, mockImage.ImageID, false, mockImage.ImageID, false) }
// updateRepositoryCache will update the repository cache // that resides in the docker persona. This will add image tag, // digest and layer information. func updateRepositoryCache(ic *ImageC) error { // LayerID for the image layer imageLayerID := ic.ImageLayers[0].ID ref, err := reference.ParseNamed(ic.Reference) if err != nil { return fmt.Errorf("Unable to parse reference: %s", err.Error()) } // get the repoCache repoCache := cache.RepositoryCache() // In the case that we don't have the ImageID, then we need // to go to the RepositoryCache to get it. if ic.ImageID == "" { // call to repository cache for the imageID for this layer ic.ImageID = repoCache.GetImageID(imageLayerID) // if we still don't have an imageID we can't continue if ic.ImageID == "" { return fmt.Errorf("ImageID not found by LayerID(%s) in RepositoryCache", imageLayerID) } } // AddReference will add the tag / digest as appropriate and will persist // to the portlayer k/v err = repoCache.AddReference(ref, ic.ImageID, false, imageLayerID, true) if err != nil { return fmt.Errorf("Unable to Add Image Reference(%s): %s", ref.String(), err.Error()) } return nil }
// TODO fix the errors so the client doesnt print the generic POST or DELETE message func (i *Image) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { defer trace.End(trace.Begin(imageRef)) var deleted []types.ImageDelete var userRefIsID bool var imageRemoved bool // Use the image cache to go from the reference to the ID we use in the image store img, err := cache.ImageCache().Get(imageRef) if err != nil { return nil, err } // Get the tags from the repo cache for this image // TODO: remove this -- we have it in the image above tags := cache.RepositoryCache().Tags(img.ImageID) // did the user pass an id or partial id userRefIsID = cache.ImageCache().IsImageID(imageRef) // do we have any reference conflicts if len(tags) > 1 && userRefIsID && !force { t := uid.Parse(img.ImageID).Truncate() return nil, fmt.Errorf("conflict: unable to delete %s (must be forced) - image is referenced in one or more repositories", t) } // if we have an ID or only 1 tag lets delete the vmdk(s) via the PL if userRefIsID || len(tags) == 1 { log.Infof("Deleting image via PL %s (%s)", img.ImageID, img.ID) // needed for image store host, err := sys.UUID() if err != nil { return nil, err } params := storage.NewDeleteImageParamsWithContext(ctx).WithStoreName(host).WithID(img.ID) // TODO: This will fail if any containerVMs are referencing the vmdk - vanilla docker // allows the removal of an image (via force flag) even if a container is referencing it // should vic? _, err = PortLayerClient().Storage.DeleteImage(params) if err != nil { switch err := err.(type) { case *storage.DeleteImageLocked: return nil, fmt.Errorf("Failed to remove image %q: %s", imageRef, err.Payload.Message) default: return nil, err } } // we've deleted the image so remove from cache cache.ImageCache().RemoveImageByConfig(img) imagec.LayerCache().Remove(img.ID) imageRemoved = true } else { // only untag the ref supplied n, err := reference.ParseNamed(imageRef) if err != nil { return nil, fmt.Errorf("unable to parse reference(%s): %s", imageRef, err.Error()) } tag := reference.WithDefaultTag(n) tags = []string{tag.String()} } // loop thru and remove from repoCache for i := range tags { // remove from cache, but don't save -- we'll do that afer all // updates refNamed, _ := cache.RepositoryCache().Remove(tags[i], false) dd := types.ImageDelete{Untagged: refNamed} deleted = append(deleted, dd) } // save repo now -- this will limit the number of PL // calls to one per rmi call err = cache.RepositoryCache().Save() if err != nil { return nil, fmt.Errorf("Untag error: %s", err.Error()) } if imageRemoved { imageDeleted := types.ImageDelete{Deleted: img.ImageID} deleted = append(deleted, imageDeleted) } return deleted, err }
// Containers returns the list of containers to show given the user's filtering. func (c *Container) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { // Get an API client to the portlayer client := c.containerProxy.Client() containme, err := client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&config.All)) if err != nil { switch err := err.(type) { case *containers.GetContainerListInternalServerError: return nil, fmt.Errorf("Error invoking GetContainerList: %s", err.Payload.Message) default: return nil, fmt.Errorf("Error invoking GetContainerList: %s", err.Error()) } } // TODO: move to conversion function containers := make([]*types.Container, 0, len(containme.Payload)) for _, t := range containme.Payload { cmd := strings.Join(t.ProcessConfig.ExecArgs, " ") // the docker client expects the friendly name to be prefixed // with a forward slash -- create a new slice and add here names := make([]string, 0, len(t.ContainerConfig.Names)) for i := range t.ContainerConfig.Names { names = append(names, clientFriendlyContainerName(t.ContainerConfig.Names[i])) } var started time.Time var stopped time.Time if t.ProcessConfig.StartTime != nil && *t.ProcessConfig.StartTime > 0 { started = time.Unix(*t.ProcessConfig.StartTime, 0) } if t.ProcessConfig.StopTime != nil && *t.ProcessConfig.StopTime > 0 { stopped = time.Unix(*t.ProcessConfig.StopTime, 0) } // get the docker friendly status _, status := dockerStatus(int(*t.ProcessConfig.ExitCode), *t.ProcessConfig.Status, *t.ContainerConfig.State, started, stopped) ips, err := externalIPv4Addrs() var ports []types.Port if err != nil { log.Errorf("Could not get IP information for reporting port bindings.") } else { ports = portInformation(t, ips) } // verify that the repo:tag exists for the container -- if it doesn't then we should present the // truncated imageID -- if we have a failure determining then we'll show the data we have repo := *t.ContainerConfig.RepoName ref, _ := reference.ParseNamed(*t.ContainerConfig.RepoName) if ref != nil { imageID, err := cache.RepositoryCache().Get(ref) if err != nil && err == cache.ErrDoesNotExist { // the tag has been removed, so we need to show the truncated imageID imageID = cache.RepositoryCache().GetImageID(*t.ContainerConfig.LayerID) if imageID != "" { id := uid.Parse(imageID) repo = id.Truncate().String() } } } c := &types.Container{ ID: *t.ContainerConfig.ContainerID, Image: repo, Created: *t.ContainerConfig.CreateTime, Status: status, Names: names, Command: cmd, SizeRw: *t.ContainerConfig.StorageSize, Ports: ports, } containers = append(containers, c) } // sort on creation time sort.Sort(sort.Reverse(containerByCreated(containers))) return containers, nil }
// CreateImageConfig constructs the image metadata from layers that compose the image func (ic *ImageC) CreateImageConfig(images []*ImageWithMeta) (metadata.ImageConfig, error) { imageLayer := images[0] // the layer that represents the actual image // if we already have an imageID associated with this layerID, we don't need // to calculate imageID and can just grab the image config from the cache id := cache.RepositoryCache().GetImageID(imageLayer.ID) if image, err := cache.ImageCache().Get(id); err == nil { return *image, nil } manifest := ic.ImageManifest image := docker.V1Image{} rootFS := docker.NewRootFS() history := make([]docker.History, 0, len(images)) diffIDs := make(map[string]string) var size int64 // step through layers to get command history and diffID from oldest to newest for i := len(images) - 1; i >= 0; i-- { layer := images[i] if err := json.Unmarshal([]byte(layer.Meta), &image); err != nil { return metadata.ImageConfig{}, fmt.Errorf("Failed to unmarshall layer history: %s", err) } h := docker.History{ Created: image.Created, Author: image.Author, CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "), Comment: image.Comment, } history = append(history, h) rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.DiffID)) diffIDs[layer.DiffID] = layer.ID size += layer.Size } // result is constructed without unused fields result := docker.Image{ V1Image: docker.V1Image{ Comment: image.Comment, Created: image.Created, Container: image.Container, ContainerConfig: image.ContainerConfig, DockerVersion: image.DockerVersion, Author: image.Author, Config: image.Config, Architecture: image.Architecture, OS: image.OS, }, RootFS: rootFS, History: history, } bytes, err := result.MarshalJSON() if err != nil { return metadata.ImageConfig{}, fmt.Errorf("Failed to marshall image metadata: %s", err) } // calculate image ID sum := fmt.Sprintf("%x", sha256.Sum256(bytes)) log.Infof("Image ID: sha256:%s", sum) // prepare metadata result.V1Image.Parent = image.Parent result.Size = size result.V1Image.ID = imageLayer.ID imageConfig := metadata.ImageConfig{ V1Image: result.V1Image, ImageID: sum, // TODO: this will change when issue 1186 is // implemented -- only populate the digests when pulled by digest Digests: []string{manifest.Digest}, Tags: []string{ic.Tag}, Name: manifest.Name, DiffIDs: diffIDs, History: history, Reference: ic.Reference, } return imageConfig, nil }