// cleanupContainer unregisters a container from the daemon, stops stats // collection and cleanly removes contents and metadata from the filesystem. func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { return derr.ErrorCodeRmRunning } if err := daemon.Kill(container); err != nil { return derr.ErrorCodeRmFailed.WithArgs(err) } } // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving dying container to disk: %v", err) } // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err == nil || forceRemove { if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil { logrus.Debugf("Unable to remove container from link graph: %s", err) } selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) daemon.LogContainerEvent(container, "destroy") } }() if err = os.RemoveAll(container.Root); err != nil { return derr.ErrorCodeRmFS.WithArgs(container.ID, err) } metadata, err := daemon.layerStore.DeleteMount(container.ID) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err) } if err = daemon.execDriver.Clean(container.ID); err != nil { return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err) } return nil }
// cleanupContainer unregisters a container from the daemon, stops stats // collection and cleanly removes contents and metadata from the filesystem. func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) return errors.NewRequestConflictError(err) } if err := daemon.Kill(container); err != nil { return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) } } // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { logrus.Errorf("Error saving dying container to disk: %v", err) } // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err == nil || forceRemove { daemon.nameIndex.Delete(container.ID) daemon.linkIndex.delete(container) selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) daemon.LogContainerEvent(container, "destroy") } }() if err = os.RemoveAll(container.Root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } // When container creation fails and `RWLayer` has not been created yet, we // do not call `ReleaseRWLayer` if container.RWLayer != nil { metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) } } return nil }
func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { if _, loaded := loadedMap[oldID]; loaded { return nil } configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) if err != nil { return err } imageJSON, err := ioutil.ReadFile(configPath) if err != nil { logrus.Debugf("Error reading json: %v", err) return err } var img struct{ Parent string } if err := json.Unmarshal(imageJSON, &img); err != nil { return err } var parentID image.ID if img.Parent != "" { for { var loaded bool if parentID, loaded = loadedMap[img.Parent]; !loaded { if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { return err } } else { break } } } // todo: try to connect with migrate code rootFS := image.NewRootFS() var history []image.History if parentID != "" { parentImg, err := l.is.Get(parentID) if err != nil { return err } rootFS = parentImg.RootFS history = parentImg.History } layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) if err != nil { return err } newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, progressOutput) if err != nil { return err } rootFS.Append(newLayer.DiffID()) h, err := v1.HistoryFromConfig(imageJSON, false) if err != nil { return err } history = append(history, h) config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) if err != nil { return err } imgID, err := l.is.Create(config) if err != nil { return err } metadata, err := l.ls.Release(newLayer) layer.LogReleaseMetadata(metadata) if err != nil { return err } if parentID != "" { if err := l.is.SetParent(imgID, parentID); err != nil { return err } } loadedMap[oldID] = imgID return nil }