func TestContainerDoubleDelete(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) daemon := &Daemon{ repository: tmp, root: tmp, } daemon.containers = container.NewMemoryStore() container := &container.Container{ CommonContainer: container.CommonContainer{ ID: "test", State: container.NewState(), Config: &containertypes.Config{}, }, } daemon.containers.Add(container.ID, container) // Mark the container as having a delete in progress if err := container.SetRemovalInProgress(); err != nil { t.Fatal(err) } // Try to remove the container when it's start is removalInProgress. // It should ignore the container and not return an error. if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { t.Fatal(err) } }
func TestContainerDoubleDelete(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) daemon := &Daemon{ repository: tmp, root: tmp, } daemon.containers = container.NewMemoryStore() container := &container.Container{ CommonContainer: container.CommonContainer{ ID: "test", State: container.NewState(), Config: &containertypes.Config{}, }, } daemon.containers.Add(container.ID, container) // Mark the container as having a delete in progress container.SetRemovalInProgress() // Try to remove the container when its state is removalInProgress. // It should return an error indicating it is under removal progress. if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err == nil { t.Fatalf("expected err: %v, got nil", fmt.Sprintf("removal of container %s is already in progress", container.ID)) } }
func TestGetContainer(t *testing.T) { c1 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", Name: "tender_bardeen", }, } c2 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", Name: "drunk_hawking", }, } c3 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", Name: "3cdbd1aa", }, } c4 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", }, } c5 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", Name: "d22d69a2b896", }, } store := container.NewMemoryStore() store.Add(c1.ID, c1) store.Add(c2.ID, c2) store.Add(c3.ID, c3) store.Add(c4.ID, c4) store.Add(c5.ID, c5) index := truncindex.NewTruncIndex([]string{}) index.Add(c1.ID) index.Add(c2.ID) index.Add(c3.ID) index.Add(c4.ID) index.Add(c5.ID) daemon := &Daemon{ containers: store, idIndex: index, nameIndex: registrar.NewRegistrar(), } daemon.reserveName(c1.ID, c1.Name) daemon.reserveName(c2.ID, c2.Name) daemon.reserveName(c3.ID, c3.Name) daemon.reserveName(c4.ID, c4.Name) daemon.reserveName(c5.ID, c5.Name) if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { t.Fatal("Should explicitly match full container IDs") } if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { t.Fatal("Should match a partial ID") } if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { t.Fatal("Should match a full name") } // c3.Name is a partial match for both c3.ID and c2.ID if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { t.Fatal("Should match a full name even though it collides with another container's ID") } if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") } if _, err := daemon.GetContainer("3cdbd1"); err == nil { t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") } if _, err := daemon.GetContainer("nothing"); err == nil { t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") } }
// NewDaemon sets up everything for the daemon to be able to service // requests from the webserver. func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { setDefaultMtu(config) // Ensure that we have a correct root key limit for launching containers. if err := ModifyRootKeyLimit(); err != nil { logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err) } // Ensure we have compatible and valid configuration options if err := verifyDaemonSettings(config); err != nil { return nil, err } // Do we have a disabled network? config.DisableBridge = isBridgeNetworkDisabled(config) // Verify the platform is supported as a daemon if !platformSupported { return nil, errSystemNotSupported } // Validate platform-specific requirements if err := checkSystem(); err != nil { return nil, err } // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event // on Windows to dump Go routine stacks setupDumpStackTrap() uidMaps, gidMaps, err := setupRemappedRoot(config) if err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { return nil, err } if err := setupDaemonProcess(config); err != nil { return nil, err } // set up the tmpDir to use a canonical path tmp, err := tempDir(config.Root, rootUID, rootGID) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) d := &Daemon{configStore: config} // Ensure the daemon is properly shutdown if there is a failure during // initialization defer func() { if err != nil { if err := d.Shutdown(); err != nil { logrus.Error(err) } } }() // Set the default isolation mode (only applicable on Windows) if err := d.setDefaultIsolation(); err != nil { return nil, fmt.Errorf("error setting default isolation mode: %v", err) } logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) if err := configureMaxThreads(config); err != nil { logrus.Warnf("Failed to configure golang's threads limit: %v", err) } installDefaultAppArmorProfile() daemonRepo := filepath.Join(config.Root, "containers") if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } driverName := os.Getenv("DOCKER_DRIVER") if driverName == "" { driverName = config.GraphDriver } d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ StorePath: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), GraphDriver: driverName, GraphDriverOptions: config.GraphOptions, UIDMaps: uidMaps, GIDMaps: gidMaps, }) if err != nil { return nil, err } graphDriver := d.layerStore.DriverName() imageRoot := filepath.Join(config.Root, "image", graphDriver) // Configure and validate the kernels security support if err := configureKernelSecuritySupport(config, graphDriver); err != nil { return nil, err } logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err } d.imageStore, err = image.NewImageStore(ifs, d.layerStore) if err != nil { return nil, err } // Configure the volumes driver volStore, err := d.configureVolumes(rootUID, rootGID) if err != nil { return nil, err } trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } trustDir := filepath.Join(config.Root, "trust") if err := system.MkdirAll(trustDir, 0700); err != nil { return nil, err } distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { return nil, err } eventsService := events.New() referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) } migrationStart := time.Now() if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) } logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) // Discovery is only enabled when the daemon is launched with an address to advertise. When // initialized, the daemon is registered and we can store the discovery backend as its read-only if err := d.initDiscovery(config); err != nil { return nil, err } sysInfo := sysinfo.New(false) // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux. if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { return nil, fmt.Errorf("Devices cgroup isn't mounted") } d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo d.containers = container.NewMemoryStore() d.execCommands = exec.NewStore() d.referenceStore = referenceStore d.distributionMetadataStore = distributionMetadataStore d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.statsCollector = d.newStatsCollector(1 * time.Second) d.defaultLogConfig = containertypes.LogConfig{ Type: config.LogConfig.Type, Config: config.LogConfig.Config, } d.RegistryService = registryService d.EventsService = eventsService d.volumes = volStore d.root = config.Root d.uidMaps = uidMaps d.gidMaps = gidMaps d.seccompEnabled = sysInfo.Seccomp d.nameIndex = registrar.NewRegistrar() d.linkIndex = newLinkIndex() d.containerdRemote = containerdRemote go d.execCommandGC() d.containerd, err = containerdRemote.Client(d) if err != nil { return nil, err } if err := d.restore(); err != nil { return nil, err } if err := pluginInit(d, config, containerdRemote); err != nil { return nil, err } return d, nil }
// NewDaemon sets up everything for the daemon to be able to service // requests from the webserver. func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { setDefaultMtu(config) // Ensure we have compatible configuration options if err := checkConfigOptions(config); err != nil { return nil, err } // Do we have a disabled network? config.DisableBridge = isBridgeNetworkDisabled(config) // Verify the platform is supported as a daemon if !platformSupported { return nil, errSystemNotSupported } // Validate platform-specific requirements if err := checkSystem(); err != nil { return nil, err } // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event // on Windows to dump Go routine stacks setupDumpStackTrap() uidMaps, gidMaps, err := setupRemappedRoot(config) if err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { return nil, err } // set up the tmpDir to use a canonical path tmp, err := tempDir(config.Root, rootUID, rootGID) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) d := &Daemon{} // Ensure the daemon is properly shutdown if there is a failure during // initialization defer func() { if err != nil { if err := d.Shutdown(); err != nil { logrus.Error(err) } } }() // Verify logging driver type if config.LogConfig.Type != "none" { if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { return nil, fmt.Errorf("error finding the logging driver: %v", err) } } logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) daemonRepo := filepath.Join(config.Root, "containers") if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } driverName := os.Getenv("DOCKER_DRIVER") if driverName == "" { driverName = config.GraphDriver } d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ StorePath: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), GraphDriver: driverName, GraphDriverOptions: config.GraphOptions, UIDMaps: uidMaps, GIDMaps: gidMaps, }) if err != nil { return nil, err } graphDriver := d.layerStore.DriverName() imageRoot := filepath.Join(config.Root, "image", graphDriver) // Configure and validate the kernels security support if err := configureKernelSecuritySupport(config, graphDriver); err != nil { return nil, err } d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency) d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err } d.imageStore, err = image.NewImageStore(ifs, d.layerStore) if err != nil { return nil, err } // Configure the volumes driver volStore, err := configureVolumes(config, rootUID, rootGID) if err != nil { return nil, err } trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } trustDir := filepath.Join(config.Root, "trust") if err := system.MkdirAll(trustDir, 0700); err != nil { return nil, err } distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { return nil, err } eventsService := events.New() referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) } if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { return nil, fmt.Errorf("Couldn't restore custom images: %s", err) } migrationStart := time.Now() if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { return nil, err } logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) // Discovery is only enabled when the daemon is launched with an address to advertise. When // initialized, the daemon is registered and we can store the discovery backend as its read-only if err := d.initDiscovery(config); err != nil { return nil, err } d.netController, err = d.initNetworkController(config) if err != nil { return nil, fmt.Errorf("Error initializing network controller: %v", err) } sysInfo := sysinfo.New(false) // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux/FreeBSD. if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { return nil, fmt.Errorf("Devices cgroup isn't mounted") } ed, err := execdrivers.NewDriver(config.ExecOptions, config.ExecRoot, config.Root, sysInfo) if err != nil { return nil, err } d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo d.containers = container.NewMemoryStore() d.execCommands = exec.NewStore() d.referenceStore = referenceStore d.distributionMetadataStore = distributionMetadataStore d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.configStore = config d.execDriver = ed d.statsCollector = d.newStatsCollector(1 * time.Second) d.defaultLogConfig = containertypes.LogConfig{ Type: config.LogConfig.Type, Config: config.LogConfig.Config, } d.RegistryService = registryService d.EventsService = eventsService d.volumes = volStore d.root = config.Root d.uidMaps = uidMaps d.gidMaps = gidMaps d.seccompEnabled = sysInfo.Seccomp d.nameIndex = registrar.NewRegistrar() d.linkIndex = newLinkIndex() if err := d.cleanupMounts(); err != nil { return nil, err } go d.execCommandGC() if err := d.restore(); err != nil { return nil, err } return d, nil }
func TestMigrateLegacySqliteLinks(t *testing.T) { tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) name1 := "test1" c1 := &container.Container{ CommonContainer: container.CommonContainer{ ID: stringid.GenerateNonCryptoID(), Name: name1, HostConfig: &containertypes.HostConfig{}, }, } c1.Root = tmpDir name2 := "test2" c2 := &container.Container{ CommonContainer: container.CommonContainer{ ID: stringid.GenerateNonCryptoID(), Name: name2, }, } store := container.NewMemoryStore() store.Add(c1.ID, c1) store.Add(c2.ID, c2) d := &Daemon{root: tmpDir, containers: store} db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) if err != nil { t.Fatal(err) } if _, err := db.Set("/"+name1, c1.ID); err != nil { t.Fatal(err) } if _, err := db.Set("/"+name2, c2.ID); err != nil { t.Fatal(err) } alias := "hello" if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { t.Fatal(err) } if err := d.migrateLegacySqliteLinks(db, c1); err != nil { t.Fatal(err) } if len(c1.HostConfig.Links) != 1 { t.Fatal("expected links to be populated but is empty") } expected := name2 + ":" + alias actual := c1.HostConfig.Links[0] if actual != expected { t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) } // ensure this is persisted b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) if err != nil { t.Fatal(err) } type hc struct { Links []string } var cfg hc if err := json.Unmarshal(b, &cfg); err != nil { t.Fatal(err) } if len(cfg.Links) != 1 { t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) } if cfg.Links[0] != expected { // same expected as above t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) } }