// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. var configAsMap map[string]*json.RawMessage if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { return nil, err } // Delete fields that didn't exist in old manifest imageType := reflect.TypeOf(img).Elem() for i := 0; i < imageType.NumField(); i++ { f := imageType.Field(i) jsonName := strings.Split(f.Tag.Get("json"), ",")[0] // Parent is handled specially below. if jsonName != "" && jsonName != "parent" { delete(configAsMap, jsonName) } } configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) } if throwaway { configAsMap["throwaway"] = rawJSON(true) } return json.Marshal(configAsMap) }
// Register imports a pre-existing image into the graph. func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) { if err := image.ValidateID(img.ID); err != nil { return err } // We need this entire operation to be atomic within the engine. Note that // this doesn't mean Register is fully safe yet. graph.imageMutex.Lock(img.ID) defer graph.imageMutex.Unlock(img.ID) defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. // FIXME: this leaves a possible race condition. if err != nil { graph.driver.Remove(img.ID) } }() // (This is a convenience to save time. Race conditions are taken care of by os.Rename) if graph.Exists(img.ID) { return fmt.Errorf("Image %s already exists", img.ID) } // Ensure that the image root does not exist on the filesystem // when it is not registered in the graph. // This is common when you switch from one graph driver to another if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { return err } // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. // (the graph is the source of truth). // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. // (FIXME: make that mandatory for drivers). graph.driver.Remove(img.ID) tmp, err := graph.Mktemp("") defer os.RemoveAll(tmp) if err != nil { return fmt.Errorf("Mktemp failed: %s", err) } // Create root filesystem in the driver if err := graph.driver.Create(img.ID, img.Parent); err != nil { return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) } // Apply the diff/layer img.SetGraph(graph) if err := image.StoreImage(img, layerData, tmp); err != nil { return err } // Commit if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { return err } graph.idIndex.Add(img.ID) return nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { var ( container *Container warnings []string img *image.Image imgID string err error ) if config.Image != "" { img, err = daemon.repositories.LookupImage(config.Image) if err != nil { return nil, nil, err } if err = img.CheckDepth(); err != nil { return nil, nil, err } imgID = img.ID } if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return nil, nil, err } } if container, err = daemon.newContainer(name, config, imgID); err != nil { return nil, nil, err } if err := daemon.Register(container); err != nil { return nil, nil, err } if err := daemon.createRootfs(container); err != nil { return nil, nil, err } if hostConfig != nil { if err := daemon.setHostConfig(container, hostConfig); err != nil { return nil, nil, err } } if err := container.Mount(); err != nil { return nil, nil, err } defer container.Unmount() if err := container.prepareVolumes(); err != nil { return nil, nil, err } if err := container.ToDisk(); err != nil { return nil, nil, err } return container, warnings, nil }
func (daemon *Daemon) checkImageDepth(img *image.Image) error { // We add 2 layers to the depth because the container's rw and // init layer add to the restriction depth, err := img.Depth() if err != nil { return err } if depth+2 >= MaxImageDepth { return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } return nil }
func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { imgID, err := ic.localImageCache.GetCache(parentID, cfg) if err != nil { return "", err } if imgID != "" { for _, s := range ic.sources { if ic.isParent(s.ID(), image.ID(imgID)) { return imgID, nil } } } var parent *image.Image lenHistory := 0 if parentID != "" { parent, err = ic.daemon.imageStore.Get(image.ID(parentID)) if err != nil { return "", errors.Wrapf(err, "unable to find image %v", parentID) } lenHistory = len(parent.History) } for _, target := range ic.sources { if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { continue } if len(target.History)-1 == lenHistory { // last if parent != nil { if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil { return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) } } return target.ID().String(), nil } imgID, err := ic.restoreCachedImage(parent, target, cfg) if err != nil { return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) } ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm return imgID.String(), nil } return "", nil }
func (b *Builder) processImageFrom(img *image.Image) error { b.image = img.ID().String() if img.Config != nil { b.runConfig = img.Config } // The default path will be blank on Windows (set by HCS) if len(b.runConfig.Env) == 0 && container.DefaultPathEnv != "" { b.runConfig.Env = append(b.runConfig.Env, "PATH="+container.DefaultPathEnv) } // Process ONBUILD triggers if they exist if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { word := "trigger" if nTriggers > 1 { word = "triggers" } fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. onBuildTriggers := b.runConfig.OnBuild b.runConfig.OnBuild = []string{} // parse the ONBUILD triggers by invoking the parser for _, step := range onBuildTriggers { ast, err := parser.Parse(strings.NewReader(step)) if err != nil { return err } for i, n := range ast.Children { switch strings.ToUpper(n.Value) { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) } if err := b.dispatch(i, n); err != nil { return err } } } return nil }
// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. var configAsMap map[string]*json.RawMessage if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { return nil, err } // Delete fields that didn't exist in old manifest delete(configAsMap, "rootfs") delete(configAsMap, "history") configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) } if throwaway { configAsMap["throwaway"] = rawJSON(true) } return json.Marshal(configAsMap) }
func newImage(image *image.Image, size int64) *types.Image { newImage := new(types.Image) newImage.ParentID = image.Parent.String() newImage.ID = image.ID().String() newImage.Created = image.Created.Unix() newImage.Size = size newImage.VirtualSize = size if image.Config != nil { newImage.Labels = image.Config.Labels } return newImage }
func newImage(image *image.Image, virtualSize int64) *types.ImageSummary { newImage := new(types.ImageSummary) newImage.ParentID = image.Parent.String() newImage.ID = image.ID().String() newImage.Created = image.Created.Unix() newImage.Size = -1 newImage.VirtualSize = virtualSize newImage.SharedSize = -1 newImage.Containers = -1 if image.Config != nil { newImage.Labels = image.Config.Labels } return newImage }
// TestPushBadParentChain tries to push an image with a corrupted parent chain // in the v1compatibility files, and makes sure the push process fixes it. func (s *DockerRegistrySuite) TestPushBadParentChain(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/badparent", privateRegistryURL) id, err := buildImage(repoName, ` FROM busybox CMD echo "adding another layer" `, true) if err != nil { c.Fatal(err) } // Push to create v1compatibility file dockerCmd(c, "push", repoName) // Corrupt the parent in the v1compatibility file from the top layer filename := filepath.Join(dockerBasePath, "graph", id, "v1Compatibility") jsonBytes, err := ioutil.ReadFile(filename) c.Assert(err, check.IsNil, check.Commentf("Could not read v1Compatibility file: %s", err)) var img image.Image err = json.Unmarshal(jsonBytes, &img) c.Assert(err, check.IsNil, check.Commentf("Could not unmarshal json: %s", err)) img.Parent = "1234123412341234123412341234123412341234123412341234123412341234" jsonBytes, err = json.Marshal(&img) c.Assert(err, check.IsNil, check.Commentf("Could not marshal json: %s", err)) err = ioutil.WriteFile(filename, jsonBytes, 0600) c.Assert(err, check.IsNil, check.Commentf("Could not write v1Compatibility file: %s", err)) dockerCmd(c, "push", repoName) // pull should succeed dockerCmd(c, "pull", repoName) }
func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { var history []image.History rootFS := image.NewRootFS() lenHistory := 0 if parent != nil { history = parent.History rootFS = parent.RootFS lenHistory = len(parent.History) } history = append(history, target.History[lenHistory]) if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { rootFS.Append(layer) } config, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: cfg, Architecture: target.Architecture, OS: target.OS, Author: target.Author, Created: history[len(history)-1].Created, }, RootFS: rootFS, History: history, OSFeatures: target.OSFeatures, OSVersion: target.OSVersion, }) if err != nil { return "", errors.Wrap(err, "failed to marshal image config") } imgID, err := ic.daemon.imageStore.Create(config) if err != nil { return "", errors.Wrap(err, "failed to create cache image") } if parent != nil { if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil { return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) } } return imgID, nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() logCfg := container.GetLogConfig(daemon.defaultLogConfig) if err := logger.ValidateLogOpts(logCfg.Type, logCfg.Config); err != nil { return nil, err } if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } // Set RWLayer for container after mount labels have been set if err := daemon.setRWLayer(container); err != nil { return nil, err } if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }
func getImageIDAndError(img *image.Image, err error) (string, error) { if img == nil || err != nil { return "", err } return img.ID().String(), nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { var ( container *Container warnings []string img *image.Image imgID string err error ) if config.Image != "" { img, err = daemon.repositories.LookupImage(config.Image) if err != nil { return nil, nil, err } if err = img.CheckDepth(); err != nil { return nil, nil, err } imgID = img.ID } if err := daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled.") } if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return nil, nil, err } } if container, err = daemon.newContainer(name, config, imgID); err != nil { return nil, nil, err } if err := daemon.Register(container); err != nil { return nil, nil, err } if err := daemon.createRootfs(container); err != nil { return nil, nil, err } if err := daemon.setHostConfig(container, hostConfig); err != nil { return nil, nil, err } if err := container.Mount(); err != nil { return nil, nil, err } defer container.Unmount() for spec := range config.Volumes { var ( name, destination string parts = strings.Split(spec, ":") ) switch len(parts) { case 2: name, destination = parts[0], filepath.Clean(parts[1]) default: name = stringid.GenerateRandomID() destination = filepath.Clean(parts[0]) } // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.isDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return nil, nil, err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path) } v, err := createVolume(name, config.VolumeDriver) if err != nil { return nil, nil, err } if err := label.Relabel(v.Path(), container.MountLabel, "z"); err != nil { return nil, nil, err } if err := container.copyImagePathContent(v, destination); err != nil { return nil, nil, err } container.addMountPointWithVolume(destination, v, true) } if err := container.ToDisk(); err != nil { return nil, nil, err } return container, warnings, nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil { return nil, err } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }
// Execute the scratch-n-push func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) { // This is clearly only relevant to docker so we're going to dig into the // transport internals a little bit to get the container ID dt := sess.Transport().(*DockerTransport) containerID := dt.containerID _, err := s.CollectArtifact(containerID) if err != nil { return -1, err } // layer.tar has an extra folder in it so we have to strip it :/ artifactReader, err := os.Open(s.options.HostPath("layer.tar")) if err != nil { return -1, err } defer artifactReader.Close() layerFile, err := os.OpenFile(s.options.HostPath("real_layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer layerFile.Close() dgst := digest.Canonical.New() mwriter := io.MultiWriter(layerFile, dgst.Hash()) tr := tar.NewReader(artifactReader) tw := tar.NewWriter(mwriter) for { hdr, err := tr.Next() if err == io.EOF { // finished the tarball break } if err != nil { return -1, err } // Skip the base dir if hdr.Name == "./" { continue } if strings.HasPrefix(hdr.Name, "output/") { hdr.Name = hdr.Name[len("output/"):] } else if strings.HasPrefix(hdr.Name, "source/") { hdr.Name = hdr.Name[len("source/"):] } if len(hdr.Name) == 0 { continue } tw.WriteHeader(hdr) _, err = io.Copy(tw, tr) if err != nil { return -1, err } } digest := dgst.Digest() config := &container.Config{ Cmd: s.cmd, Entrypoint: s.entrypoint, Hostname: containerID[:16], WorkingDir: s.workingDir, Volumes: s.volumes, ExposedPorts: tranformPorts(s.ports), } // Make the JSON file we need t := time.Now() base := image.V1Image{ Architecture: "amd64", Container: containerID, ContainerConfig: container.Config{ Hostname: containerID[:16], }, DockerVersion: "1.10", Created: t, OS: "linux", Config: config, } imageJSON := image.Image{ V1Image: base, History: []image.History{image.History{Created: t}}, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{layer.DiffID(digest)}, }, } js, err := imageJSON.MarshalJSON() if err != nil { return -1, err } hash := sha256.New() hash.Write(js) layerID := hex.EncodeToString(hash.Sum(nil)) err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755) if err != nil { return -1, err } layerFile.Close() err = os.Rename(layerFile.Name(), s.options.HostPath("scratch", layerID, "layer.tar")) if err != nil { return -1, err } defer os.RemoveAll(s.options.HostPath("scratch")) // VERSION file versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer versionFile.Close() _, err = versionFile.Write([]byte("1.0")) if err != nil { return -1, err } err = versionFile.Sync() if err != nil { return -1, err } // json file jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer jsonFile.Close() _, err = jsonFile.Write(js) if err != nil { return -1, err } err = jsonFile.Sync() if err != nil { return -1, err } // repositories file repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer repositoriesFile.Close() _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.authenticator.Repository(s.repository)))) if err != nil { return -1, err } if len(s.tags) == 0 { s.tags = []string{"latest"} } for i, tag := range s.tags { _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID))) if err != nil { return -1, err } if i != len(s.tags)-1 { _, err = repositoriesFile.Write([]byte{','}) if err != nil { return -1, err } } } _, err = repositoriesFile.Write([]byte{'}', '}'}) err = repositoriesFile.Sync() if err != nil { return -1, err } // Build our output tarball and start writing to it imageFile, err := os.Create(s.options.HostPath("scratch.tar")) if err != nil { return -1, err } defer imageFile.Close() err = util.TarPath(imageFile, s.options.HostPath("scratch")) if err != nil { return -1, err } imageFile.Close() client, err := NewDockerClient(s.dockerOptions) if err != nil { return 1, err } // Check the auth if !s.dockerOptions.DockerLocal { check, err := s.authenticator.CheckAccess(s.repository, auth.Push) if !check || err != nil { s.logger.Errorln("Not allowed to interact with this repository:", s.repository) return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository) } } s.repository = s.authenticator.Repository(s.repository) s.logger.WithFields(util.LogFields{ "Repository": s.repository, "Tags": s.tags, "Message": s.message, }).Debug("Scratch push to registry") // Okay, we can access it, do a docker load to import the image then push it loadFile, err := os.Open(s.options.HostPath("scratch.tar")) if err != nil { return -1, err } defer loadFile.Close() e, err := core.EmitterFromContext(ctx) if err != nil { return 1, err } err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile}) if err != nil { return 1, err } return s.tagAndPush(layerID, e, client) }
// SquashImage creates a new image with the diff of the specified image and the specified parent. // This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. // The existing image(s) is not destroyed. // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. func (daemon *Daemon) SquashImage(id, parent string) (string, error) { img, err := daemon.imageStore.Get(image.ID(id)) if err != nil { return "", err } var parentImg *image.Image var parentChainID layer.ChainID if len(parent) != 0 { parentImg, err = daemon.imageStore.Get(image.ID(parent)) if err != nil { return "", errors.Wrap(err, "error getting specified parent layer") } parentChainID = parentImg.RootFS.ChainID() } else { rootFS := image.NewRootFS() parentImg = &image.Image{RootFS: rootFS} } l, err := daemon.layerStore.Get(img.RootFS.ChainID()) if err != nil { return "", errors.Wrap(err, "error getting image layer") } defer daemon.layerStore.Release(l) ts, err := l.TarStreamFrom(parentChainID) if err != nil { return "", errors.Wrapf(err, "error getting tar stream to parent") } defer ts.Close() newL, err := daemon.layerStore.Register(ts, parentChainID) if err != nil { return "", errors.Wrap(err, "error registering layer") } defer daemon.layerStore.Release(newL) var newImage image.Image newImage = *img newImage.RootFS = nil var rootFS image.RootFS rootFS = *parentImg.RootFS rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) newImage.RootFS = &rootFS for i, hi := range newImage.History { if i >= len(parentImg.History) { hi.EmptyLayer = true } newImage.History[i] = hi } now := time.Now() var historyComment string if len(parent) > 0 { historyComment = fmt.Sprintf("merge %s to %s", id, parent) } else { historyComment = fmt.Sprintf("create new from %s", id) } newImage.History = append(newImage.History, image.History{ Created: now, Comment: historyComment, }) newImage.Created = now b, err := json.Marshal(&newImage) if err != nil { return "", errors.Wrap(err, "error marshalling image config") } newImgID, err := daemon.imageStore.Create(b) if err != nil { return "", errors.Wrap(err, "error creating new image after squash") } return string(newImgID), nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { //获取镜像 img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } //获取镜像ID号 imgID = img.ID() } //合并并且检查参数 if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } //创建一个新的容器,此时需要跟踪分析newContainer方法,该方法在daemon/daemon.go中。 //该方法返回的是container,是一个这样的结构: /* type Container struct { //这是通用的参数 CommonContainer // 这个是平台特殊的参数,只在类unix系统上有意义。 AppArmorProfile string HostnamePath string HostsPath string ShmPath string ResolvConfPath string SeccompProfile string NoNewPrivileges bool } */ //创建容器实例,实际上只是在代码中创建,并没有创建文件系统和namespace。 //实际上,创建容器的过程中最多也就创建到文件系统,namespace只有在容器 //运行时候才会生效。 if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } //如果创建容器出错,就试图删除容器。 defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() //配置容器的安全设置,例如apparmor,selinux等。 if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } // Set RWLayer for container after mount labels have been set // 设置可读写层,就是获取layID等信息,包括镜像层、容器层。 //详情请见setRWLayer函数,就在本文件中。 if err := daemon.setRWLayer(container); err != nil { return nil, err } //向daemon注册容器: //daemon.containers.Add(c.ID, c) if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } //设置容器root目录(元数据目录/var/lib/docker/containers)的权限。 if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } //这个方法主要做两件事情: //挂载volumes; //设置容器之间的链接links; if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() //这一步是处理和平台相关的操作,具体在daemon/create_unix.go文件中 /* func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) } v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) if err != nil { return err } if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } container.AddMountPointWithVolume(destination, v, true) } return daemon.populateVolumes(container) } */ //在这一步骤里面进行一些跟平台相关的设置,主要为mount目录文件,以及volume挂载。 if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } //网络endpoints的配置 var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } //更新网路配置,在daemon/container_operations.go中,调用了libnetwork模块,需要仔细研究。 //这里仅仅是更新container.NetworkSettings.Networks[]数组中的网络模式。并没有真正创建网络。 if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } //将容器的配置保存到磁盘。但是这个和另外一个todisk的区别是什么? if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } //记录容器的事件日志。 daemon.LogContainerEvent(container, "create") return container, nil }
// CreateImageConfig constructs the image metadata from layers that compose the image func CreateImageConfig(images []*ImageWithMeta, manifest *Manifest) error { if len(images) == 0 { return nil } imageLayer := images[0] // the layer that represents the actual image image := docker.V1Image{} rootFS := docker.NewRootFS() history := make([]docker.History, 0, len(images)) diffIDs := make(map[string]string) var size int64 // step through layers to get command history and diffID from oldest to newest for i := len(images) - 1; i >= 0; i-- { layer := images[i] if err := json.Unmarshal([]byte(layer.meta), &image); err != nil { return fmt.Errorf("Failed to unmarshall layer history: %s", err) } h := docker.History{ Created: image.Created, Author: image.Author, CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "), Comment: image.Comment, } history = append(history, h) rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.diffID)) diffIDs[layer.diffID] = layer.ID size += layer.size } // result is constructed without unused fields result := docker.Image{ V1Image: docker.V1Image{ Comment: image.Comment, Created: image.Created, Container: image.Container, ContainerConfig: image.ContainerConfig, DockerVersion: image.DockerVersion, Author: image.Author, Config: image.Config, Architecture: image.Architecture, OS: image.OS, }, RootFS: rootFS, History: history, } bytes, err := result.MarshalJSON() if err != nil { return fmt.Errorf("Failed to marshall image metadata: %s", err) } // calculate image ID sum := fmt.Sprintf("%x", sha256.Sum256(bytes)) log.Infof("Image ID: sha256:%s", sum) // prepare metadata result.V1Image.Parent = image.Parent result.Size = size result.V1Image.ID = imageLayer.ID metaData := metadata.ImageConfig{ V1Image: result.V1Image, ImageID: sum, // TODO: this will change when issue 1186 is // implemented -- only populate the digests when pulled by digest Digests: []string{manifest.Digest}, Tags: []string{options.tag}, Name: manifest.Name, DiffIDs: diffIDs, History: history, } blob, err := json.Marshal(metaData) if err != nil { return fmt.Errorf("Failed to marshal image metadata: %s", err) } // store metadata imageLayer.meta = string(blob) return nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.cleanupContainer(container, true, true); err != nil { logrus.Errorf("failed to cleanup container on create error: %v", err) } } }() if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } container.HostConfig.StorageOpt = params.HostConfig.StorageOpt // Set RWLayer for container after mount labels have been set if err := daemon.setRWLayer(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards API compatibility. container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) daemon.updateContainerNetworkSettings(container, endpointsConfigs) if err := container.ToDisk(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } if err := daemon.Register(container); err != nil { return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }
// CreateV2Manifest creates a V2 manifest from an image config and set of // FSLayer digests. // FIXME: This should be moved to the distribution repo, since it will also // be useful for converting new manifests to the old format. func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) { if len(img.History) == 0 { return nil, errors.New("empty history when trying to create V2 manifest") } // Generate IDs for each layer // For non-top-level layers, create fake V1Compatibility strings that // fit the format and don't collide with anything else, but don't // result in runnable images on their own. type v1Compatibility struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` ContainerConfig struct { Cmd []string } `json:"container_config,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } fsLayerList := make([]schema1.FSLayer, len(img.History)) history := make([]schema1.History, len(img.History)) parent := "" layerCounter := 0 for i, h := range img.History { if i == len(img.History)-1 { break } var diffID layer.DiffID if h.EmptyLayer { diffID = layer.EmptyLayer.DiffID() } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } diffID = img.RootFS.DiffIDs[layerCounter] layerCounter++ } fsLayer, present := fsLayers[diffID] if !present { return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String()) } dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent)) if err != nil { return nil, err } v1ID := dgst.Hex() v1Compatibility := v1Compatibility{ ID: v1ID, Parent: parent, Comment: h.Comment, Created: h.Created, } v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} if h.EmptyLayer { v1Compatibility.ThrowAway = true } jsonBytes, err := json.Marshal(&v1Compatibility) if err != nil { return nil, err } reversedIndex := len(img.History) - i - 1 history[reversedIndex].V1Compatibility = string(jsonBytes) fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer} parent = v1ID } latestHistory := img.History[len(img.History)-1] var diffID layer.DiffID if latestHistory.EmptyLayer { diffID = layer.EmptyLayer.DiffID() } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } diffID = img.RootFS.DiffIDs[layerCounter] } fsLayer, present := fsLayers[diffID] if !present { return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String()) } dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON()))) if err != nil { return nil, err } fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer} // Top-level v1compatibility string should be a modified version of the // image config. transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer) if err != nil { return nil, err } history[0].V1Compatibility = string(transformedConfig) // windows-only baselayer setup if err := setupBaseLayer(history, *img.RootFS); err != nil { return nil, err } return &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, Architecture: img.Architecture, FSLayers: fsLayerList, History: history, }, nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.cleanupContainer(container, true); err != nil { logrus.Errorf("failed to cleanup container on create error: %v", err) } } }() if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } container.HostConfig.StorageOpt = params.HostConfig.StorageOpt // Set RWLayer for container after mount labels have been set if err := daemon.setRWLayer(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } if err := container.ToDisk(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } if err := daemon.Register(container); err != nil { return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }
// CreateImageConfig constructs the image metadata from layers that compose the image func (ic *ImageC) CreateImageConfig(images []*ImageWithMeta) (metadata.ImageConfig, error) { imageLayer := images[0] // the layer that represents the actual image // if we already have an imageID associated with this layerID, we don't need // to calculate imageID and can just grab the image config from the cache id := cache.RepositoryCache().GetImageID(imageLayer.ID) if image, err := cache.ImageCache().Get(id); err == nil { return *image, nil } manifest := ic.ImageManifest image := docker.V1Image{} rootFS := docker.NewRootFS() history := make([]docker.History, 0, len(images)) diffIDs := make(map[string]string) var size int64 // step through layers to get command history and diffID from oldest to newest for i := len(images) - 1; i >= 0; i-- { layer := images[i] if err := json.Unmarshal([]byte(layer.Meta), &image); err != nil { return metadata.ImageConfig{}, fmt.Errorf("Failed to unmarshall layer history: %s", err) } h := docker.History{ Created: image.Created, Author: image.Author, CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "), Comment: image.Comment, } history = append(history, h) rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.DiffID)) diffIDs[layer.DiffID] = layer.ID size += layer.Size } // result is constructed without unused fields result := docker.Image{ V1Image: docker.V1Image{ Comment: image.Comment, Created: image.Created, Container: image.Container, ContainerConfig: image.ContainerConfig, DockerVersion: image.DockerVersion, Author: image.Author, Config: image.Config, Architecture: image.Architecture, OS: image.OS, }, RootFS: rootFS, History: history, } bytes, err := result.MarshalJSON() if err != nil { return metadata.ImageConfig{}, fmt.Errorf("Failed to marshall image metadata: %s", err) } // calculate image ID sum := fmt.Sprintf("%x", sha256.Sum256(bytes)) log.Infof("Image ID: sha256:%s", sum) // prepare metadata result.V1Image.Parent = image.Parent result.Size = size result.V1Image.ID = imageLayer.ID imageConfig := metadata.ImageConfig{ V1Image: result.V1Image, ImageID: sum, // TODO: this will change when issue 1186 is // implemented -- only populate the digests when pulled by digest Digests: []string{manifest.Digest}, Tags: []string{ic.Tag}, Name: manifest.Name, DiffIDs: diffIDs, History: history, Reference: ic.Reference, } return imageConfig, nil }