func init() { reexec.Register("namespaced", namespaced) if reexec.Init() { os.Exit(0) } }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) flag.Usage = func() { fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ -h | --help | -v | --version ]\n\n") fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(os.Stdout) flag.PrintDefaults() help := "\nCommands:\n" for _, cmd := range dockerCommands { help += fmt.Sprintf(" %-10.10s%s\n", cmd.name, cmd.description) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(os.Stdout, "%s\n", help) } flag.Parse() if *flVersion { showVersion() return } clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) // TODO: remove once `-d` is retired handleGlobalDaemonFlag() if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } c := cli.New(clientCli, daemonCli) if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(os.Stderr, sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func main() { fmt.Println("test") if reexec.Init() { return } }
func TestMain(m *testing.M) { reexec.Register("allocate", allocate) if reexec.Init() { return } os.Exit(m.Run()) }
func TestMain(m *testing.M) { reexec.Register("enqueue", reexecEnqueue) if reexec.Init() { return } os.Exit(m.Run()) }
func TestMain(m *testing.M) { if reexec.Init() { return } if err := createController(); err != nil { os.Exit(1) } option := options.Generic{ "EnableIPForwarding": true, } genericOption := make(map[string]interface{}) genericOption[netlabel.GenericData] = option err := controller.ConfigureNetworkDriver(bridgeNetType, genericOption) if err != nil { //m.Fatal(err) os.Exit(1) } libnetwork.SetTestDataStore(controller, datastore.NewCustomDataStore(datastore.NewMockStore())) os.Exit(m.Run()) }
func init() { // Do not sure chroot to speed run time and allow archive // errors or hangs to be debugged directly from the test process. untar = archive.UntarUncompressed graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer reexec.Init() }
func init() { reexec.Init() if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { dockerBinary = dockerBin } var err error dockerBinary, err = exec.LookPath(dockerBinary) if err != nil { fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err) os.Exit(1) } if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { registryImageName = registryImage } if registry := os.Getenv("REGISTRY_URL"); registry != "" { privateRegistryURL = registry } workingDirectory, _ = os.Getwd() // Deterministically working out the environment in which CI is running // to evaluate whether the daemon is local or remote is not possible through // a build tag. // // For example Windows to Linux CI under Jenkins tests the 64-bit // Windows binary build with the daemon build tag, but calls a remote // Linux daemon. // // We can't just say if Windows then assume the daemon is local as at // some point, we will be testing the Windows CLI against a Windows daemon. // // Similarly, it will be perfectly valid to also run CLI tests from // a Linux CLI (built with the daemon tag) against a Windows daemon. if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { isLocalDaemon = false } else { isLocalDaemon = true } // TODO Windows CI. This are incorrect and need fixing into // platform specific pieces. // This is only used for a tests with local daemon true (Linux-only today) // default is "/var/lib/docker", but we'll try and ask the // /info endpoint for the specific root dir dockerBasePath = "/var/lib/docker" type Info struct { DockerRootDir string } var i Info status, b, err := sockRequest("GET", "/info", nil) if err == nil && status == 200 { if err = json.Unmarshal(b, &i); err == nil { dockerBasePath = i.DockerRootDir } } volumesConfigPath = dockerBasePath + "/volumes" containerStoragePath = dockerBasePath + "/containers" }
func main() { for name, f := range entrypoints { reexec.Register(name, f) } if !reexec.Init() { control.Main() } }
func Test(t *testing.T) { reexec.Init() // This is required for external graphdriver tests if !isLocalDaemon { fmt.Println("INFO: Testing against a remote daemon") } else { fmt.Println("INFO: Testing against a local daemon") } check.TestingT(t) }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. _, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, daemonCli.commonFlags.FlagSet) flag.Usage = func() { fmt.Fprint(stdout, "Usage: dockerd [ --help | -v | --version ]\n\n") fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(stdout) flag.PrintDefaults() } flag.CommandLine.ShortUsage = func() { fmt.Fprint(stderr, "\nUsage:\tdockerd [OPTIONS]\n") } if err := flag.CommandLine.ParseFlags(os.Args[1:], false); err != nil { os.Exit(1) } if *flVersion { showVersion() return } if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } // On Windows, this may be launching as a service or with an option to // register the service. stop, err := initService() if err != nil { logrus.Fatal(err) } if !stop { err = daemonCli.start() notifyShutdown(err) if err != nil { logrus.Fatal(err) } } }
func main() { if reexec.Init() { return } _, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) err := dnetApp(stdout, stderr) if err != nil { os.Exit(1) } }
func init() { var err error reexec.Init() // This is required for external graphdriver tests testEnv, err = environment.New() if err != nil { fmt.Println(err) os.Exit(1) } assignGlobalVariablesFromTestEnv(testEnv) }
func TestMain(m *testing.M) { if reexec.Init() { return } if err := createController(); err != nil { logrus.Errorf("Error creating controller: %v", err) os.Exit(1) } x := m.Run() controller.Stop() os.Exit(x) }
func Test(t *testing.T) { reexec.Init() // This is required for external graphdriver tests if !isLocalDaemon { fmt.Println("INFO: Testing against a remote daemon") } else { fmt.Println("INFO: Testing against a local daemon") } if daemonPlatform == "linux" { ensureFrozenImagesLinux(t) } check.TestingT(t) }
func main() { registerCmd("/init", osInit.MainInit) registerCmd(config.SYSINIT_BIN, sysinit.Main) registerCmd("/usr/bin/dockerlaunch", dockerlaunch.Main) registerCmd("/usr/bin/user-docker", userdocker.Main) registerCmd("/usr/bin/system-docker", systemdocker.Main) registerCmd("/sbin/poweroff", power.PowerOff) registerCmd("/sbin/reboot", power.Reboot) registerCmd("/sbin/halt", power.Halt) registerCmd("/sbin/shutdown", power.Main) registerCmd("/usr/bin/respawn", respawn.Main) registerCmd("/usr/bin/ros", control.Main) registerCmd("/usr/bin/cloud-init", cloudinit.Main) registerCmd("/usr/sbin/netconf", network.Main) registerCmd("/usr/sbin/wait-for-network", waitfornetwork.Main) registerCmd("/usr/sbin/wait-for-docker", wait.Main) if !reexec.Init() { reexec.Register(os.Args[0], control.Main) if !reexec.Init() { log.Fatalf("Failed to find an entry point for %s", os.Args[0]) } } }
func main() { if reexec.Init() { return } graphc := cli.NewApp() graphc.Name = "graphc" graphc.Usage = "manage graphc storage" graphc.Flags = []cli.Flag{ cli.StringFlag{ Name: "home", Value: "/var/lib/docker", Usage: "home directory for graphdriver storage operations", EnvVar: "GRAPHDRIVER_HOME", }, cli.StringFlag{ Name: "configdir", Value: "/etc/docker", Usage: "directory for docker configuration", }, cli.StringFlag{ Name: "storage-driver, driver, s", Value: "", Usage: "storage driver to use", EnvVar: "GRAPHDRIVER_BACKEND", }, cli.StringSliceFlag{ Name: "storage-opt", Value: &cli.StringSlice{}, Usage: "set storage driver options", EnvVar: "GRAPHDRIVER_OPTIONS", }, cli.StringFlag{ Name: "context, c", Value: "", Usage: "optional mountlabel (SELinux context)", }, cli.BoolFlag{ Name: "debug, D", Usage: "print debugging information", }, } graphc.EnableBashCompletion = true graphc.Commands = commands graphc.Run(os.Args) os.Exit(0) }
func main() { if reexec.Init() { return } fnd := flag.Bool("nondaemon", false, "Not daemonize") flDisableIptables := flag.Bool("noniptables", false, "Don't enable iptables rules") flConfig := flag.String("config", "", "Config file for hyperd") flHost := flag.String("host", "", "Host for hyperd") flMirrors := flag.String("registry_mirror", "", "Prefered docker registry mirror") flInsecureRegistries := flag.String("insecure_registry", "", "Enable insecure registry communication") flHelp := flag.Bool("help", false, "Print help message for Hyperd daemon") flag.Set("alsologtostderr", "true") flag.Set("log_dir", "/var/log/hyper/") os.MkdirAll("/var/log/hyper/", 0755) flag.Usage = func() { printHelp() } flag.Parse() if *flHelp == true { printHelp() return } if !*fnd { path, err := osext.Executable() if err != nil { fmt.Printf("cannot find self executable path for %s: %v\n", os.Args[0], err) os.Exit(-1) } _, err = runvutils.ExecInDaemon(path, append([]string{os.Args[0], "--nondaemon"}, os.Args[1:]...)) if err != nil { fmt.Println("faile to daemonize hyperd") os.Exit(-1) } return } var opts = &Options{ DisableIptables: *flDisableIptables, Config: *flConfig, Hosts: *flHost, Mirrors: *flMirrors, InsecureRegistries: *flInsecureRegistries, } mainDaemon(opts) }
func TestMain(m *testing.M) { if reexec.Init() { return } if err := createController(); err != nil { log.Errorf("Error creating controller: %v", err) os.Exit(1) } //libnetwork.SetTestDataStore(controller, datastore.NewCustomDataStore(datastore.NewMockStore())) x := m.Run() controller.Stop() os.Exit(x) }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. _, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) cmd := newDaemonCommand() cmd.SetOutput(stdout) if err := cmd.Execute(); err != nil { fmt.Fprintf(stderr, "%s\n", err) os.Exit(1) } }
func main() { if reexec.Init() { return } if os.Geteuid() != 0 { glog.Errorf("The Hyper daemon needs to be run as root") return } // hyper needs Linux kernel 3.8.0+ if err := checkKernel(3, 8, 0); err != nil { glog.Errorf(err.Error()) return } fnd := flag.Bool("nondaemon", false, "[deprecated flag]") // TODO: remove it when 0.8 is released flDisableIptables := flag.Bool("noniptables", false, "Don't enable iptables rules") flConfig := flag.String("config", "", "Config file for hyperd") flHost := flag.String("host", "", "Host for hyperd") flMirrors := flag.String("registry_mirror", "", "Prefered docker registry mirror") flInsecureRegistries := flag.String("insecure_registry", "", "Enable insecure registry communication") flHelp := flag.Bool("help", false, "Print help message for Hyperd daemon") flag.Set("alsologtostderr", "true") flag.Set("log_dir", "/var/log/hyper/") os.MkdirAll("/var/log/hyper/", 0755) flag.Usage = func() { printHelp() } flag.Parse() if *flHelp == true { printHelp() return } if *fnd { fmt.Printf("flag --nondaemon is deprecated\n") } var opt = &Options{ DisableIptables: *flDisableIptables, Config: *flConfig, Hosts: *flHost, Mirrors: *flMirrors, InsecureRegistries: *flInsecureRegistries, } mainDaemon(opt) }
func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. os.Setenv("DOCKER_DRIVER", "vfs") os.Setenv("TEST", "1") os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) // Hack to run sys init during unit testing if reexec.Init() { return } if uid := syscall.Geteuid(); uid != 0 { log.Fatalf("docker tests need to be run as root") } // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { src, err := os.Open(dockerinit) if err != nil { log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) } defer src.Close() dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) if err != nil { log.Fatalf("Unable to create dockerinit in test directory: %s", err) } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) } dst.Close() src.Close() } // Setup the base daemon, which will be duplicated for each test. // (no tests are run directly in the base) setupBaseImage() // Create the "global daemon" with a long-running daemons for integration tests spawnGlobalDaemon() spawnLegitHttpsDaemon() spawnRogueHttpsDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() }
func main() { registerCmd("/init", osInit.MainInit) registerCmd(osInit.SYSINIT, sysinit.Main) registerCmd("/usr/bin/system-docker", systemdocker.Main) registerCmd("/sbin/poweroff", power.PowerOff) registerCmd("/sbin/reboot", power.Reboot) registerCmd("/sbin/halt", power.Halt) registerCmd("/sbin/shutdown", power.Main) registerCmd("/usr/bin/respawn", respawn.Main) registerCmd("/usr/sbin/rancherctl", control.Main) // deprecated, use `ros` instead registerCmd("/usr/sbin/ros", control.Main) registerCmd("/usr/bin/cloud-init", cloudinit.Main) registerCmd("/usr/sbin/netconf", network.Main) registerCmd("/usr/sbin/wait-for-docker", wait.Main) if !reexec.Init() { log.Fatalf("Failed to find an entry point for %s", os.Args[0]) } }
func init() { if reexec.Init() { os.Exit(0) } maxId := uint32(sysinfo.Min(sysinfo.MustGetMaxValidUID(), sysinfo.MustGetMaxValidGID())) idMappings = rootfs_provider.MappingList{ { ContainerID: 0, HostID: maxId, Size: 1, }, { ContainerID: 1, HostID: 1, Size: maxId - 1, }, } }
func init() { reexec.Init() }
func main() { if reexec.Init() { return } flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flLogLevel != "" { lvl, err := log.ParseLevel(*flLogLevel) if err != nil { log.Fatalf("Unable to parse logging level: %s", *flLogLevel) } initLogging(lvl) } else { initLogging(log.InfoLevel) } // -D, --debug, -l/--log-level=debug processing // When/if -D is removed this block can be deleted if *flDebug { os.Setenv("DEBUG", "1") initLogging(log.DebugLevel) } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } defaultHost, err := api.ValidateHost(defaultHost) if err != nil { log.Fatal(err) } flHosts = append(flHosts, defaultHost) } setDefaultConfFlag(flTrustKey, defaultTrustKeyFile) if *flDaemon { mainDaemon() return } if len(flHosts) > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls if flag.IsSet("-tlsverify") { *flTls = true } // If we should verify the server, we need to load a trusted ca if *flTlsVerify { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } tlsConfig.Certificates = []tls.Certificate{cert} } // Avoid fallback to SSL protocols < TLS1.0 tlsConfig.MinVersion = tls.VersionTLS10 } if *flTls || *flTlsVerify { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } }
func main() { if reexec.Init() { return } cf_debug_server.AddFlags(flag.CommandLine) cf_lager.AddFlags(flag.CommandLine) flag.Parse() logger, _ := cf_lager.New("guardian") if *depotPath == "" { missing("-depot") } if *iodaemonBin == "" { missing("-iodaemonBin") } if *nstarBin == "" { missing("-nstarBin") } if *tarBin == "" { missing("-tarBin") } resolvedRootFSPath, err := filepath.EvalSymlinks(*rootFSPath) if err != nil { panic(err) } _, networkPoolCIDR, err := net.ParseCIDR(*networkPool) if err != nil { panic(err) } interfacePrefix := fmt.Sprintf("w%s", *tag) chainPrefix := fmt.Sprintf("w-%s-instance", *tag) iptablesMgr := wireIptables(logger, *tag, *allowHostAccess, interfacePrefix, chainPrefix) externalIPAddr, err := parseExternalIP(*externalIP) if err != nil { panic(err) } sysInfoProvider := sysinfo.NewProvider(*depotPath) propManager := properties.NewManager() backend := &gardener.Gardener{ SysInfoProvider: sysInfoProvider, UidGenerator: wireUidGenerator(), Starter: wireStarter(logger, iptablesMgr), Networker: wireNetworker(logger, *tag, networkPoolCIDR, externalIPAddr, iptablesMgr, interfacePrefix, chainPrefix, propManager, *networkModulePath), VolumeCreator: wireVolumeCreator(logger, *graphRoot), Containerizer: wireContainerizer(logger, *depotPath, *iodaemonBin, *nstarBin, *tarBin, resolvedRootFSPath), Logger: logger, PropertyManager: propManager, } gardenServer := server.New(*listenNetwork, *listenAddr, *graceTime, backend, logger.Session("api")) err = gardenServer.Start() if err != nil { logger.Fatal("failed-to-start-server", err) } signals := make(chan os.Signal, 1) go func() { <-signals gardenServer.Stop() os.Exit(0) }() signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) logger.Info("started", lager.Data{ "network": *listenNetwork, "addr": *listenAddr, }) select {} }
func TestMain(m *testing.M) { if reexec.Init() { return } os.Exit(m.Run()) }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() initLogging(stderr) flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flConfigDir != "" { cliconfig.SetConfigDir(*flConfigDir) } if *flLogLevel != "" { lvl, err := logrus.ParseLevel(*flLogLevel) if err != nil { fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel) os.Exit(1) } setLogLevel(lvl) } else { setLogLevel(logrus.InfoLevel) } if *flDebug { os.Setenv("DEBUG", "1") setLogLevel(logrus.DebugLevel) } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { if runtime.GOOS != "windows" { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) } else { // If we do not have a host, default to TCP socket on Windows defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) } } defaultHost, err := opts.ValidateHost(defaultHost) if err != nil { if *flDaemon { logrus.Fatal(err) } else { fmt.Fprint(os.Stderr, err) } os.Exit(1) } flHosts = append(flHosts, defaultHost) } setDefaultConfFlag(flTrustKey, defaultTrustKeyFile) // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls // *flTlsVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well if flag.IsSet("-tlsverify") || *flTlsVerify { *flTls = true } if *flDaemon { if *flHelp { flag.Usage() return } mainDaemon() return } // From here on, we assume we're a client, not a server. if len(flHosts) > 1 { fmt.Fprintf(os.Stderr, "Please specify only one -H") os.Exit(0) } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var tlsConfig *tls.Config if *flTls { tlsOptions.InsecureSkipVerify = !*flTlsVerify if !flag.IsSet("-tlscert") { if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { tlsOptions.CertFile = "" } } if !flag.IsSet("-tlskey") { if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { tlsOptions.KeyFile = "" } } var err error tlsConfig, err = tlsconfig.Client(tlsOptions) if err != nil { fmt.Fprintln(stderr, err) os.Exit(1) } } cli := client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], tlsConfig) if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(client.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(cli.Err(), sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(cli.Err(), err) os.Exit(1) } }
func init() { if reexec.Init() { os.Exit(0) } }