// CmdHelp displays information on a Docker command. // // If more than one command is specified, information is only shown for the first command. // // Usage: docker help COMMAND or docker COMMAND --help // CmdHelp 在Docker命令行上显示提示信息 // // 如果指定了多个命令,只显示第一个命令的提示信息 // // 用法:docker help COMMAND 或者 docker COMMAND --help // func (cli *Cli) CmdHelp(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) switch err := err.(type) { case nil: command("--help") return nil case initErr: return err.error } } if len(args) > 0 { command, err := cli.command(args[0]) switch err := err.(type) { case nil: command("--help") return nil case initErr: return err.error } cli.noSuchCommand(args[0]) } if cli.Usage == nil { flag.Usage() } else { cli.Usage() } return nil }
func main() { var ( as []string hostMatch string ) mflagext.ListVar(&as, []string{"a", "-alias"}, []string{}, "Specify hostname aliases in the form alias:hostname. Can be repeated.") mflag.StringVar(&hostMatch, []string{"h", "-host-match"}, "*.weave.local", "Specify main host shExpMatch expression in pacfile") mflag.Parse() var aliases = map[string]string{} for _, a := range as { parts := strings.SplitN(a, ":", 2) if len(parts) != 2 { fmt.Printf("'%s' is not a valid alias.\n", a) mflag.Usage() os.Exit(1) } aliases[parts[0]] = parts[1] } go socksProxy(aliases) t := template.Must(template.New("pacfile").Parse(pacfile)) http.HandleFunc("/proxy.pac", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/x-ns-proxy-autoconfig") t.Execute(w, pacFileParameters{hostMatch, aliases}) }) if err := http.ListenAndServe(":8080", nil); err != nil { panic(err) } }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) flag.Usage = func() { fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ -h | --help | -v | --version ]\n\n") fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(os.Stdout) flag.PrintDefaults() help := "\nCommands:\n" for _, cmd := range dockerCommands { help += fmt.Sprintf(" %-10.10s%s\n", cmd.name, cmd.description) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(os.Stdout, "%s\n", help) } flag.Parse() if *flVersion { showVersion() return } clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) // TODO: remove once `-d` is retired handleGlobalDaemonFlag() if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } c := cli.New(clientCli, daemonCli) if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(os.Stderr, sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func readConfig() { var ( configFile string showHelp, showVersion bool ) logFilter = &logutils.LevelFilter{ Levels: logLevels, MinLevel: logMinLevel, Writer: os.Stderr, } log.SetOutput(logFilter) flag.StringVar(&configFile, []string{"c", "-config"}, "/etc/logear/logear.conf", "config file") flag.StringVar(&logFile, []string{"l", "-log"}, "", "log file") flag.BoolVar(&showHelp, []string{"h", "-help"}, false, "display the help") flag.BoolVar(&showVersion, []string{"v", "-version"}, false, "display version info") flag.Parse() if showHelp { flag.Usage() os.Exit(0) } if showVersion { println(versionstring) println("OS: " + runtime.GOOS) println("Architecture: " + runtime.GOARCH) os.Exit(0) } parseTomlFile(configFile) startLogging() log.Printf("%s started with pid %d", versionstring, os.Getpid()) }
// CmdHelp displays information on a Docker command. // // If more than one command is specified, information is only shown for the first command. // // Usage: docker help COMMAND or docker COMMAND --help func (cli *Cli) CmdHelp(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) if err == nil { command("--help") return nil } if err != errCommandNotFound { return err } } if len(args) > 0 { command, err := cli.command(args[0]) if err != nil { if err == errCommandNotFound { cli.noSuchCommand(args[0]) return nil } return err } command("--help") return nil } if cli.Usage == nil { flag.Usage() } else { cli.Usage() } return nil }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } eng := engine.New() signal.Trap(eng.Shutdown) // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { log.Fatal(err) } if err := d.Install(eng); err != nil { log.Fatal(err) } b := &builder.BuilderJob{eng, d} b.Install() // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, daemonCfg.ExecDriver, daemonCfg.GraphDriver, ) // Serve api job := eng.Job("serveapi", flHosts...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.Setenv("TrustKey", *flTrustKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } }
// doFlags defines the cmdline Usage string and parses flag options. func doFlags() { flag.Usage = func() { fmt.Fprintf(os.Stderr, " Usage: %s [OPTIONS] REGISTRY REPO [REPO...]\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\n REGISTRY:\n") fmt.Fprintf(os.Stderr, "\tURL of your Docker registry; use index.docker.io for Docker Hub, use local.host to collect images from local Docker host\n") fmt.Fprintf(os.Stderr, "\n REPO:\n") fmt.Fprintf(os.Stderr, "\tOne or more repos to gather info about; if no repo is specified Collector will gather info on *all* repos in the Registry\n") fmt.Fprintf(os.Stderr, "\n Environment variables:\n") fmt.Fprintf(os.Stderr, "\tCOLLECTOR_DIR: (Required) Directory that contains the \"data\" folder with Collector default scripts, e.g., $GOPATH/src/github.com/banyanops/collector\n") fmt.Fprintf(os.Stderr, "\tCOLLECTOR_ID: ID provided by Banyan web interface to register Collector with the Banyan service\n") fmt.Fprintf(os.Stderr, "\tBANYAN_HOST_DIR: Host directory mounted into Collector/Target containers where results are stored (default: $HOME/.banyan)\n") fmt.Fprintf(os.Stderr, "\tBANYAN_DIR: (Specify only in Dockerfile) Directory in the Collector container where host directory BANYAN_HOST_DIR is mounted\n") fmt.Fprintf(os.Stderr, "\tDOCKER_{HOST,CERT_PATH,TLS_VERIFY}: If set, e.g., by docker-machine, then they take precedence over --dockerProto and --dockerAddr\n") printExampleUsage() fmt.Fprintf(os.Stderr, " Options:\n") flag.PrintDefaults() } flag.Parse() if config.COLLECTORDIR() == "" { flag.Usage() os.Exit(except.ErrorExitStatus) } if len(flag.Args()) < 1 { flag.Usage() os.Exit(except.ErrorExitStatus) } if *dockerProto != "unix" && *dockerProto != "tcp" { flag.Usage() os.Exit(except.ErrorExitStatus) } requiredDirs := []string{config.BANYANDIR(), filepath.Dir(*imageList), filepath.Dir(*repoList), *config.BanyanOutDir, collector.DefaultScriptsDir, collector.UserScriptsDir, collector.BinDir} for _, dir := range requiredDirs { blog.Debug("Creating directory: " + dir) err := fsutil.CreateDirIfNotExist(dir) if err != nil { except.Fail(err, ": Error in creating a required directory: ", dir) } } collector.RegistrySpec = flag.Arg(0) // EqualFold: case insensitive comparison if strings.EqualFold(flag.Arg(0), "local.host") { collector.LocalHost = true } //nextMaxImages = *maxImages }
func dnetCommand(stdout, stderr io.Writer) error { flag.Parse() if *flHelp { flag.Usage() return nil } if *flLogLevel != "" { lvl, err := logrus.ParseLevel(*flLogLevel) if err != nil { fmt.Fprintf(stderr, "Unable to parse logging level: %s\n", *flLogLevel) return err } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } if *flDebug { logrus.SetLevel(logrus.DebugLevel) } if *flHost == "" { defaultHost := os.Getenv("DNET_HOST") if defaultHost == "" { // TODO : Add UDS support defaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) } *flHost = defaultHost } dc, err := newDnetConnection(*flHost) if err != nil { if *flDaemon { logrus.Error(err) } else { fmt.Fprint(stderr, err) } return err } if *flDaemon { err := dc.dnetDaemon() if err != nil { logrus.Errorf("dnet Daemon exited with an error : %v", err) } return err } cli := client.NewNetworkCli(stdout, stderr, dc.httpCall) if err := cli.Cmd("dnet", flag.Args()...); err != nil { fmt.Fprintln(stderr, err) return err } return nil }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. _, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, daemonCli.commonFlags.FlagSet) flag.Usage = func() { fmt.Fprint(stdout, "Usage: dockerd [ --help | -v | --version ]\n\n") fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(stdout) flag.PrintDefaults() } flag.CommandLine.ShortUsage = func() { fmt.Fprint(stderr, "\nUsage:\tdockerd [OPTIONS]\n") } if err := flag.CommandLine.ParseFlags(os.Args[1:], false); err != nil { os.Exit(1) } if *flVersion { showVersion() return } if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } // On Windows, this may be launching as a service or with an option to // register the service. stop, err := initService() if err != nil { logrus.Fatal(err) } if !stop { err = daemonCli.start() notifyShutdown(err) if err != nil { logrus.Fatal(err) } } }
func main() { fmt.Print("Hello, world\n") // Define global flag here // end global flag definition flag.Usage = func() { fmt.Fprint(os.Stdout, "Usage: docking [OPTIONS] COMMAND [arg...]\n docking [ --help | -v | --version ]\n\n") fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(os.Stdout) flag.PrintDefaults() help := "\nCommands:\n" for _, cmd := range dockingCommands { help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) } help += "\nRun 'docking COMMAND --help' for more information on a command." fmt.Fprintf(os.Stdout, "%s\n", help) } flag.Parse() if *flVersion { showVersion() return } if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } clientCli := client.NewDockingCli(os.Stdin, os.Stdout, os.Stderr, clientFlags) c := cli.New(clientCli) if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(os.Stderr, sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
// Cmd is borrowed from Docker UI and acts as the entry point for network UI commands. // network UI commands are designed to be invoked from multiple parent chains func (cli *NetworkCli) Cmd(chain string, args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) if exists { return method(chain+" "+args[0], args[2:]...) } } if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { return fmt.Errorf("%s: '%s' is not a %s command. See '%s --help'.\n", chain, args[0], chain, chain) } return method(chain, args[1:]...) } flag.Usage() return nil }
func (dcli *DockerfCli) CmdContainer(args ...string) error { flag.CommandLine.Parse(args) if *flVersion { showVersion() os.Exit(1) } if *flDaemon { if *flHelp { flag.Usage() os.Exit(1) } fmt.Printf("dockerf container does not support daemon.\n") os.Exit(1) } if *flMachine == "" { app := path.Base(os.Args[0]) cmd := "container" fmt.Printf("%s: \"%s\" requires --machine flag for manage any container. See '%s %s --help'. \n", app, cmd, app, cmd) os.Exit(1) } cluster := "" if *flSwarm { cluster = "swarm" } rewriteTLSFlags(dcli, *flMachine, cluster) cli := newDockerClient() if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(client.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(cli.Err(), sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(cli.Err(), err) os.Exit(1) } return nil }
func main() { flag.Usage = func() { fmt.Fprint(os.Stdout, "Usage: gofind [OPTIONS] COMMAND [arg...]\ngofind [ --help | -v | --version ]\n") flag.CommandLine.SetOutput(os.Stdout) flag.PrintDefaults() } if h { flag.Usage() } else { fmt.Printf("Path: %s\n", path) fmt.Printf("Name: %s\n", name) fmt.Printf("ARGS: %v\n", flag.Args()) err := finder(path, name) if err != nil { fmt.Println(err) return } } }
// CmdHelp displays information on a Docker command. // // If more than one command is specified, information is only shown for the first command. // // Usage: docker help COMMAND or docker COMMAND --help func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) if exists { method("--help") return nil } } if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0]) } method("--help") return nil } flag.Usage() return nil }
// CmdHelp displays information on a Docker command. // // If more than one command is specified, information is only shown for the first command. // // Usage: docker help COMMAND or docker COMMAND --help func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) if exists { method("--help") return nil } } if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0]) os.Exit(1) } else { method("--help") return nil } } flag.Usage() return nil }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } eng := engine.New() signal.Trap(eng.Shutdown) if err := migrateKey(); err != nil { logrus.Fatal(err) } daemonCfg.TrustKeyPath = *flTrustKey // Load builtins if err := builtins.Register(eng); err != nil { logrus.Fatal(err) } // load registry service if err := registry.NewService(registryCfg).Install(eng); err != nil { logrus.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting daemonInitWait := make(chan error) go func() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { daemonInitWait <- err return } logrus.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, d.ExecutionDriver().Name(), d.GraphDriver().String(), ) if err := d.Install(eng); err != nil { daemonInitWait <- err return } b := &builder.BuilderJob{eng, d} b.Install() // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { daemonInitWait <- err return } daemonInitWait <- nil }() // Serve api job := eng.Job("serveapi", flHosts...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", daemonCfg.EnableCors) job.Setenv("CorsHeaders", daemonCfg.CorsHeaders) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", daemonCfg.SocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) // The serve API job never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := job.Run(); err != nil { logrus.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return } serveAPIWait <- nil }() // Wait for the daemon startup goroutine to finish // This makes sure we can actually cleanly shutdown the daemon logrus.Debug("waiting for daemon to initialize") errDaemon := <-daemonInitWait if errDaemon != nil { eng.Shutdown() outStr := fmt.Sprintf("Shutting down daemon due to errors: %v", errDaemon) if strings.Contains(errDaemon.Error(), "engine is shutdown") { // if the error is "engine is shutdown", we've already reported (or // will report below in API server errors) the error outStr = "Shutting down daemon due to reported errors" } // we must "fatal" exit here as the API server may be happy to // continue listening forever if the error had no impact to API logrus.Fatal(outStr) } else { logrus.Info("Daemon has completed initialization") } // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete errAPI := <-serveAPIWait // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) eng.Shutdown() if errAPI != nil { logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } }
func main() { //启动前检查,防止出现已经启动的情况 if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) //合并参数类型,所有的参数类型都是FlagSet类,该类定义如下: //type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. /*Usage func() ShortUsage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag args []string // arguments after flags errorHandling ErrorHandling output io.Writer // nil means stderr; use Out() accessor nArgRequirements []nArgRequirement }*/ //合并参数到flag.CommandLine中。CommandLine中对应的是Options。 flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) //打印docker的使用方式。这里其实只是设置Usage这个函数的实现,以便需要打印的时候打印。 flag.Usage = func() { //第一行是正常的使用方式;第二行是daemon的使用方式;第三行固定输出 fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") //输出Options的内容,打印CommandLine中的内容。这一行设置输出到标准输出。 flag.CommandLine.SetOutput(stdout) //真实的打印内容 flag.PrintDefaults() //开始打印可选择的命令Commands。 help := "\nCommands:\n" //循环输出dockerCommands命令中的可选择命令,分别打印他们的名字和描述信息。 //dockerCommands最终由cli.DockerCommands中提供。 for _, cmd := range dockerCommands { help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) } help += "\nRun 'docker COMMAND --help' for more information on a command." //真实的打印 fmt.Fprintf(stdout, "%s\n", help) } //解析参数 flag.Parse() //version单独处理,这里通过判断flVersion的返回结果是否为真进行处理。 if *flVersion { showVersion() return } //help信息单独处理 if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } //创建client模式的docker,详细分析请见api/client/cli.go包中的NewDockerCli函数。 clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) //合并client模式的docker和daemonCli模式的docker,实际中只会有一个在工作。 //daemonCli对象的创建请见docker/daemon.go中的daemonCli cli.Handler = NewDaemonCli() //cli.New接受两个参数,分别是dockercli对象和daemoncli对象,两个对象结构不同。 //但是返回的是一个cli对象: /* type Cli struct { Stderr io.Writer handlers []Handler Usage func() } */ //clientClie和daemoncli就放在句柄handlers数组中。 c := cli.New(clientCli, daemonCli) //c.Run函数见cli中的cli.go的func (cli *Cli) Run(args ...string) error if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(stderr, sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(stderr, err) os.Exit(1) } }
func showHelp() { mflag.Usage() }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() initLogging(stderr) flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flConfigDir != "" { cliconfig.SetConfigDir(*flConfigDir) } if *flLogLevel != "" { lvl, err := logrus.ParseLevel(*flLogLevel) if err != nil { fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel) os.Exit(1) } setLogLevel(lvl) } else { setLogLevel(logrus.InfoLevel) } if *flDebug { os.Setenv("DEBUG", "1") setLogLevel(logrus.DebugLevel) } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { if runtime.GOOS != "windows" { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) } else { // If we do not have a host, default to TCP socket on Windows defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) } } defaultHost, err := opts.ValidateHost(defaultHost) if err != nil { if *flDaemon { logrus.Fatal(err) } else { fmt.Fprint(os.Stderr, err) } os.Exit(1) } flHosts = append(flHosts, defaultHost) } setDefaultConfFlag(flTrustKey, defaultTrustKeyFile) // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls // *flTlsVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well if flag.IsSet("-tlsverify") || *flTlsVerify { *flTls = true } if *flDaemon { if *flHelp { flag.Usage() return } mainDaemon() return } // From here on, we assume we're a client, not a server. if len(flHosts) > 1 { fmt.Fprintf(os.Stderr, "Please specify only one -H") os.Exit(0) } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var tlsConfig *tls.Config if *flTls { tlsOptions.InsecureSkipVerify = !*flTlsVerify if !flag.IsSet("-tlscert") { if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { tlsOptions.CertFile = "" } } if !flag.IsSet("-tlskey") { if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { tlsOptions.KeyFile = "" } } var err error tlsConfig, err = tlsconfig.Client(tlsOptions) if err != nil { fmt.Fprintln(stderr, err) os.Exit(1) } } cli := client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], tlsConfig) if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(client.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(cli.Err(), sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(cli.Err(), err) os.Exit(1) } }
func main() { if len(dockerConfDir) == 0 { dockerConfDir = filepath.Join(os.Getenv("HOME"), ".docker") } if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { // Running in init mode sysinit.SysInit() return } var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flGraphOpts opts.ListOpts flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIPAddress) flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerConfDir, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerConfDir, defaultCertFile), "Path to TLS certificate file") flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerConfDir, defaultKeyFile), "Path to TLS key file") flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options") flag.Parse() if *flVersion { showVersion() return } if flHosts.Len() == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } flHosts.Set(defaultHost) } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !*flEnableIptables && !*flInterContainerComm { log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if net.ParseIP(*flDefaultIp) == nil { log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) } if *flDebug { os.Setenv("DEBUG", "1") } if *flDaemon { if runtime.GOOS != "linux" { log.Fatalf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } if flag.NArg() != 0 { flag.Usage() return } // set up the TempDir to use a canonical path tmp := os.TempDir() realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) // get the canonical path to the Docker root directory root := *flRoot var realRoot string if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { realRoot = root } else { realRoot, err = utils.ReadSymlinkedDirectory(root) if err != nil { log.Fatalf("Unable to get the full path to root (%s): %s", root, err) } } if err := checkKernelAndArch(); err != nil { log.Fatal(err) } eng := engine.New() // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // handle the pidfile early. https://github.com/docker/docker/issues/6973 if len(*pidfile) > 0 { job := eng.Job("initserverpidfile", *pidfile) if err := job.Run(); err != nil { log.Fatal(err) } } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") // include the variable here too, for the server config job.Setenv("Pidfile", *pidfile) job.Setenv("Root", realRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) job.SetenvList("Sockets", flHosts.GetAll()) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, *flExecDriver, *flGraphDriver) // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } } else { if flHosts.Len() > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // If we should verify the server, we need to load a trusted ca if *flTlsVerify { *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } tlsConfig.Certificates = []tls.Certificate{cert} } } if *flTls || *flTlsVerify { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.ParseCommands(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } } }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !*flEnableIptables && !*flInterContainerComm { log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if net.ParseIP(*flDefaultIp) == nil { log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) } eng := engine.New() signal.Trap(eng.Shutdown) // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") // include the variable here too, for the server config job.Setenv("Pidfile", *pidfile) job.Setenv("Root", *flRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) job.SetenvList("Sockets", flHosts.GetAll()) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, *flExecDriver, *flGraphDriver) // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } }
func main() { // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) cobraAdaptor := cobraadaptor.NewCobraAdaptor(clientFlags) flag.Usage = func() { fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n docker [ --help | -v | --version ]\n\n") fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(stdout) flag.PrintDefaults() help := "\nCommands:\n" dockerCommands := append(cli.DockerCommandUsage, cobraAdaptor.Usage()...) for _, cmd := range sortCommands(dockerCommands) { help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(stdout, "%s\n", help) } flag.Parse() if *flVersion { showVersion() return } if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) c := cli.New(clientCli, NewDaemonProxy(), cobraAdaptor) if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(stderr, sterr.Status) } // StatusError should only be used for errors, and all errors should // have a non-zero exit status, so never exit with 0 if sterr.StatusCode == 0 { os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(stderr, err) os.Exit(1) } }
func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() initLogging(stderr) flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flLogLevel != "" { lvl, err := logrus.ParseLevel(*flLogLevel) if err != nil { logrus.Fatalf("Unable to parse logging level: %s", *flLogLevel) } setLogLevel(lvl) } else { setLogLevel(logrus.InfoLevel) } // -D, --debug, -l/--log-level=debug processing // When/if -D is removed this block can be deleted if *flDebug { os.Setenv("DEBUG", "1") setLogLevel(logrus.DebugLevel) } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } defaultHost, err := api.ValidateHost(defaultHost) if err != nil { logrus.Fatal(err) } flHosts = append(flHosts, defaultHost) } setDefaultConfFlag(flTrustKey, defaultTrustKeyFile) if *flDaemon { if *flHelp { flag.Usage() return } mainDaemon() return } if len(flHosts) > 1 { logrus.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls if flag.IsSet("-tlsverify") { *flTls = true } // If we should verify the server, we need to load a trusted ca if *flTlsVerify { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { logrus.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { logrus.Fatalf("Couldn't load X509 key pair: %q. Make sure the key is encrypted", err) } tlsConfig.Certificates = []tls.Certificate{cert} } // Avoid fallback to SSL protocols < TLS1.0 tlsConfig.MinVersion = tls.VersionTLS10 } if *flTls || *flTlsVerify { cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(client.StatusError); ok { if sterr.Status != "" { logrus.Println(sterr.Status) } os.Exit(sterr.StatusCode) } logrus.Fatal(err) } }
// TODO rewrite this whole PoC func main() { flag.Usage = func() { flag.PrintDefaults() } flag.Parse() if debug { os.Setenv("DEBUG", "1") log.SetLevel(log.DebugLevel) } if flag.NArg() == 0 { fmt.Println("ERROR: no image names provided") flag.Usage() os.Exit(1) } // make tempDir tempDir, err := ioutil.TempDir("", "docker-fetch-") if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } defer os.RemoveAll(tempDir) fetcher := NewFetcher(tempDir) sc := registry.NewServiceConfig(rOptions) for _, arg := range flag.Args() { remote, tagName := parsers.ParseRepositoryTag(arg) if tagName == "" { tagName = "latest" } repInfo, err := sc.NewRepositoryInfo(remote) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("%#v %q\n", repInfo, tagName) idx, err := repInfo.GetEndpoint() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fmt.Fprintf(os.Stderr, "Pulling %s:%s from %s\n", repInfo.RemoteName, tagName, idx) var session *registry.Session if s, ok := fetcher.sessions[idx.String()]; ok { session = s } else { // TODO(vbatts) obviously the auth and http factory shouldn't be nil here session, err = registry.NewSession(nil, nil, idx, timeout) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } rd, err := session.GetRepositoryData(repInfo.RemoteName) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("rd: %#v", rd) // produce the "repositories" file for the archive if _, ok := fetcher.repositories[repInfo.RemoteName]; !ok { fetcher.repositories[repInfo.RemoteName] = graph.Repository{} } log.Debugf("repositories: %#v", fetcher.repositories) if len(rd.Endpoints) == 0 { log.Fatalf("expected registry endpoints, but received none from the index") } tags, err := session.GetRemoteTags(rd.Endpoints, repInfo.RemoteName, rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if hash, ok := tags[tagName]; ok { fetcher.repositories[repInfo.RemoteName][tagName] = hash } log.Debugf("repositories: %#v", fetcher.repositories) imgList, err := session.GetRemoteHistory(fetcher.repositories[repInfo.RemoteName][tagName], rd.Endpoints[0], rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("imgList: %#v", imgList) for _, imgID := range imgList { // pull layers and jsons buf, _, err := session.GetRemoteImageJSON(imgID, rd.Endpoints[0], rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = os.MkdirAll(filepath.Join(fetcher.Root, imgID), 0755); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err := os.Create(filepath.Join(fetcher.Root, imgID, "json")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = fh.Write(buf); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh.Close() log.Debugf("%s", fh.Name()) tarRdr, err := session.GetRemoteImageLayer(imgID, rd.Endpoints[0], rd.Tokens, 0) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err = os.Create(filepath.Join(fetcher.Root, imgID, "layer.tar")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } // the body is usually compressed gzRdr, err := gzip.NewReader(tarRdr) if err != nil { log.Debugf("image layer for %q is not gzipped", imgID) // the archive may not be gzipped, so just copy the stream if _, err = io.Copy(fh, tarRdr); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } else { // no error, so gzip decompress the stream if _, err = io.Copy(fh, gzRdr); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = gzRdr.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } if err = tarRdr.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = fh.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("%s", fh.Name()) } } // marshal the "repositories" file for writing out log.Debugf("repositories: %q", fetcher.repositories) buf, err := json.Marshal(fetcher.repositories) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err := os.Create(filepath.Join(fetcher.Root, "repositories")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = fh.Write(buf); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh.Close() log.Debugf("%s", fh.Name()) var output io.WriteCloser if outputStream == "-" { output = os.Stdout } else { output, err = os.Create(outputStream) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } defer output.Close() if err = os.Chdir(fetcher.Root); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } tarStream, err := archive.Tar(".", archive.Uncompressed) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = io.Copy(output, tarStream); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func mainDaemon() { if utils.ExperimentalBuild() { logrus.Warn("Running experimental build") } if flag.NArg() != 0 { flag.Usage() return } logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: timeutils.RFC3339NanoFixed}) if err := setDefaultUmask(); err != nil { logrus.Fatalf("Failed to set umask: %v", err) } var pfile *pidfile.PidFile if daemonCfg.Pidfile != "" { pf, err := pidfile.New(daemonCfg.Pidfile) if err != nil { logrus.Fatalf("Error starting daemon: %v", err) } pfile = pf defer func() { if err := pfile.Remove(); err != nil { logrus.Error(err) } }() } serverConfig := &apiserver.ServerConfig{ Logging: true, EnableCors: daemonCfg.EnableCors, CorsHeaders: daemonCfg.CorsHeaders, Version: dockerversion.VERSION, } serverConfig = setPlatformServerConfig(serverConfig, daemonCfg) if *flTls { if *flTlsVerify { tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { logrus.Fatal(err) } serverConfig.TLSConfig = tlsConfig } api := apiserver.New(serverConfig) // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := api.ServeApi(flHosts); err != nil { logrus.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return } serveAPIWait <- nil }() if err := migrateKey(); err != nil { logrus.Fatal(err) } daemonCfg.TrustKeyPath = *flTrustKey registryService := registry.NewService(registryCfg) d, err := daemon.NewDaemon(daemonCfg, registryService) if err != nil { if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } logrus.Fatalf("Error starting daemon: %v", err) } logrus.Info("Daemon has completed initialization") logrus.WithFields(logrus.Fields{ "version": dockerversion.VERSION, "commit": dockerversion.GITCOMMIT, "execdriver": d.ExecutionDriver().Name(), "graphdriver": d.GraphDriver().String(), }).Info("Docker daemon") signal.Trap(func() { api.Close() <-serveAPIWait shutdownDaemon(d, 15) if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } }) // after the daemon is done setting up we can tell the api to start // accepting connections with specified daemon api.AcceptConnections(d) // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait shutdownDaemon(d, 15) if errAPI != nil { if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } }
func main() { flag.Usage = func() { flag.PrintDefaults() } flag.Parse() if debug { os.Setenv("DEBUG", "1") logrus.SetLevel(logrus.DebugLevel) } if flag.NArg() == 0 { flag.Usage() logrus.Fatal("no image names provided") } // make temporary working directory tempFetchRoot, err := ioutil.TempDir("", "docker-fetch-") if err != nil { logrus.Fatal(err) } refs := []*fetch.ImageRef{} for _, arg := range flag.Args() { ref := fetch.NewImageRef(arg) fmt.Fprintf(os.Stderr, "Pulling %s\n", ref) r := fetch.NewRegistry(ref.Host()) layersFetched, err := r.FetchLayers(ref, tempFetchRoot) if err != nil { logrus.Errorf("failed pulling %s, skipping: %s", ref, err) continue } logrus.Debugf("fetched %d layers for %s", len(layersFetched), ref) refs = append(refs, ref) } // marshal the "repositories" file for writing out buf, err := fetch.FormatRepositories(refs...) if err != nil { logrus.Fatal(err) } fh, err := os.Create(filepath.Join(tempFetchRoot, "repositories")) if err != nil { logrus.Fatal(err) } if _, err = fh.Write(buf); err != nil { logrus.Fatal(err) } fh.Close() logrus.Debugf("%s", fh.Name()) var output io.WriteCloser if outputStream == "-" { output = os.Stdout } else { output, err = os.Create(outputStream) if err != nil { logrus.Fatal(err) } } defer output.Close() if err = os.Chdir(tempFetchRoot); err != nil { logrus.Fatal(err) } tarStream, err := archive.Tar(".", archive.Uncompressed) if err != nil { logrus.Fatal(err) } if _, err = io.Copy(output, tarStream); err != nil { logrus.Fatal(err) } }