func spawnGlobalDaemon() { if globalDaemon != nil { log.Debugf("Global daemon already exists. Skipping.") return } t := std_log.New(os.Stderr, "", 0) eng := NewTestEngine(t) globalEngine = eng globalDaemon = mkDaemonFromEngine(eng, t) // Spawn a Daemon go func() { log.Debugf("Spawning global daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonProto, Host: testDaemonAddr, } job := eng.Job("serveapi", listenURL.String()) job.SetenvBool("Logging", true) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatalf("Unable to accept connections for test api: %s", err) } }
func GetTestImage(daemon *daemon.Daemon) *image.Image { imgs, err := daemon.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) } for _, image := range imgs { if image.ID == unitTestImageID { return image } } log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) return nil }
func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. os.Setenv("DOCKER_DRIVER", "vfs") os.Setenv("TEST", "1") os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) // Hack to run sys init during unit testing if reexec.Init() { return } if uid := syscall.Geteuid(); uid != 0 { log.Fatalf("docker tests need to be run as root") } // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { src, err := os.Open(dockerinit) if err != nil { log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) } defer src.Close() dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) if err != nil { log.Fatalf("Unable to create dockerinit in test directory: %s", err) } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) } dst.Close() src.Close() } // Setup the base daemon, which will be duplicated for each test. // (no tests are run directly in the base) setupBaseImage() // Create the "global daemon" with a long-running daemons for integration tests spawnGlobalDaemon() spawnLegitHttpsDaemon() spawnRogueHttpsDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() }
func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { t := std_log.New(os.Stderr, "", 0) root, err := newTestDirectory(unitTestStoreBase) if err != nil { t.Fatal(err) } // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, // and we want to set it to true. eng := newTestEngine(t, true, root) // Spawn a Daemon go func() { log.Debugf("Spawning https daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonHttpsProto, Host: addr, } job := eng.Job("serveapi", listenURL.String()) job.SetenvBool("Logging", true) job.SetenvBool("Tls", true) job.SetenvBool("TlsVerify", true) job.Setenv("TlsCa", cacert) job.Setenv("TlsCert", cert) job.Setenv("TlsKey", key) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start time.Sleep(time.Second) if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatalf("Unable to accept connections for test api: %s", err) } return eng }
func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) config, _, _, err := runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } container := daemon.Get(createTestContainer(eng, config, t)) containerID := container.ID if container.Name == "" { t.Fatalf("Expected not empty container name") } if c := daemon.Get(container.Name); c == nil { log.Fatalf("Could not lookup container %s by its name", container.Name) } else if c.ID != containerID { log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) } }
func setupBaseImage() { eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) job := eng.Job("image_inspect", unitTestImageName) img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { // Retrieve the Image job = eng.Job("pull", unitTestImageName) job.Stdout.Add(utils.NopWriteCloser(os.Stdout)) if err := job.Run(); err != nil { log.Fatalf("Unable to pull the test image: %s", err) } } }
func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { // Apply configuration defaults if config.Mtu == 0 { // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore config.Mtu = GetDefaultNetworkMtu() } // Check for mutually incompatible config options if config.BridgeIface != "" && config.BridgeIP != "" { return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !config.EnableIptables && !config.InterContainerCommunication { return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } // FIXME: DisableNetworkBidge doesn't need to be public anymore config.DisableNetwork = config.BridgeIface == DisableNetworkBridge // Claim the pidfile first, to avoid any and all unexpected race conditions. // Some of the init doesn't need a pidfile lock - but let's not try to be smart. if config.Pidfile != "" { if err := utils.CreatePidFile(config.Pidfile); err != nil { return nil, err } eng.OnShutdown(func() { // Always release the pidfile last, just in case utils.RemovePidFile(config.Pidfile) }) } // Check that the system is supported and we have sufficient privileges // FIXME: return errors instead of calling Fatal if runtime.GOOS != "linux" { log.Fatalf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } if err := checkKernelAndArch(); err != nil { log.Fatalf(err.Error()) } // set up the TempDir to use a canonical path tmp, err := utils.TempDir(config.Root) if err != nil { log.Fatalf("Unable to get the TempDir under %s: %s", config.Root, err) } realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) if !config.EnableSelinuxSupport { selinuxSetDisabled() } // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { realRoot, err = utils.ReadSymlinkedDirectory(config.Root) if err != nil { log.Fatalf("Unable to get the full path to root (%s): %s", config.Root, err) } } config.Root = realRoot // Create the root directory if it doesn't exists if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { return nil, err } // Set the default driver graphdriver.DefaultDriver = config.GraphDriver // Load storage driver driver, err := graphdriver.New(config.Root, config.GraphOptions) if err != nil { return nil, err } log.Debugf("Using graph driver %s", driver) // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled if config.EnableSelinuxSupport && driver.String() == "btrfs" { return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!") } daemonRepo := path.Join(config.Root, "containers") if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { return nil, err } // Migrate the container if it is aufs and aufs is enabled if err = migrateIfAufs(driver, config.Root); err != nil { return nil, err } log.Debugf("Creating images graph") g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { return nil, err } // We don't want to use a complex driver like aufs or devmapper // for volumes, just a plain filesystem volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) if err != nil { return nil, err } log.Debugf("Creating volumes graph") volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) if err != nil { return nil, err } log.Debugf("Creating repository list") repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } if !config.DisableNetwork { job := eng.Job("init_networkdriver") job.SetenvBool("EnableIptables", config.EnableIptables) job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) job.SetenvBool("EnableIpForward", config.EnableIpForward) job.Setenv("BridgeIface", config.BridgeIface) job.Setenv("BridgeIP", config.BridgeIP) job.Setenv("DefaultBindingIP", config.DefaultIp.String()) if err := job.Run(); err != nil { return nil, err } } graphdbPath := path.Join(config.Root, "linkgraph.db") graph, err := graphdb.NewSqliteConn(graphdbPath) if err != nil { return nil, err } localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.") } if sysInitPath != localCopy { // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { return nil, err } if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { return nil, err } if err := os.Chmod(localCopy, 0700); err != nil { return nil, err } sysInitPath = localCopy } sysInfo := sysinfo.New(false) ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo) if err != nil { return nil, err } daemon := &Daemon{ repository: daemonRepo, containers: &contStore{s: make(map[string]*Container)}, graph: g, repositories: repositories, idIndex: truncindex.NewTruncIndex([]string{}), sysInfo: sysInfo, volumes: volumes, config: config, containerGraph: graph, driver: driver, sysInitPath: sysInitPath, execDriver: ed, eng: eng, } if err := daemon.checkLocaldns(); err != nil { return nil, err } if err := daemon.restore(); err != nil { return nil, err } // Setup shutdown handlers // FIXME: can these shutdown handlers be registered closer to their source? eng.OnShutdown(func() { // FIXME: if these cleanup steps can be called concurrently, register // them as separate handlers to speed up total shutdown time // FIXME: use engine logging instead of log.Errorf if err := daemon.shutdown(); err != nil { log.Errorf("daemon.shutdown(): %s", err) } if err := portallocator.ReleaseAll(); err != nil { log.Errorf("portallocator.ReleaseAll(): %s", err) } if err := daemon.driver.Cleanup(); err != nil { log.Errorf("daemon.driver.Cleanup(): %s", err.Error()) } if err := daemon.containerGraph.Close(); err != nil { log.Errorf("daemon.containerGraph.Close(): %s", err.Error()) } }) return daemon, nil }