func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) } else { root = dir } } os.MkdirAll(root, 0700) eng := engine.New() eng.Logging = false // Load default plugins builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) cfg := &daemon.Config{ Root: root, AutoRestart: autorestart, ExecDriver: "native", // Either InterContainerCommunication or EnableIptables must be set, // otherwise NewDaemon will fail because of conflicting settings. InterContainerCommunication: true, } d, err := daemon.NewDaemon(cfg, eng) if err != nil { t.Fatal(err) } if err := d.Install(eng); err != nil { t.Fatal(err) } return eng }
func TestGetInfo(t *testing.T) { eng := engine.New() var called bool eng.Register("info", func(job *engine.Job) error { called = true v := &engine.Env{} v.SetInt("Containers", 1) v.SetInt("Images", 42000) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/info", nil, eng, t) if !called { t.Fatalf("handler was not called") } v := readEnv(r.Body, t) if v.GetInt("Images") != 42000 { t.Fatalf("%#v\n", v) } if v.GetInt("Containers") != 1 { t.Fatalf("%#v\n", v) } assertContentType(r, "application/json", t) }
func TestAllocatePortDetection(t *testing.T) { eng := engine.New() eng.Logging = false freePort := findFreePort(t) // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } // Allocate same port twice, expect failure on second call job = newPortAllocationJob(eng, freePort) if res := AllocatePort(job); res != engine.StatusOK { t.Fatal("Failed to find a free port to allocate") } if res := AllocatePort(job); res == engine.StatusOK { t.Fatal("Duplicate port allocation granted by AllocatePort") } }
func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) } else { root = dir } } os.MkdirAll(root, 0700) eng := engine.New() // Load default plugins builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) job := eng.Job("initserver") job.Setenv("Root", root) job.SetenvBool("AutoRestart", autorestart) job.Setenv("ExecDriver", "native") // TestGetEnabledCors and TestOptionsRoute require EnableCors=true job.SetenvBool("EnableCors", true) if err := job.Run(); err != nil { t.Fatal(err) } return eng }
func TestGetContainersByName(t *testing.T) { eng := engine.New() name := "container_name" var called bool eng.Register("container_inspect", func(job *engine.Job) error { called = true if job.Args[0] != name { t.Errorf("name != '%s': %#v", name, job.Args[0]) } if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { t.Errorf("dirty env variable not set") } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { t.Errorf("dirty env variable set when it shouldn't") } v := &engine.Env{} v.SetBool("dirty", true) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) if !called { t.Fatal("handler was not called") } assertContentType(r, "application/json", t) var stdoutJson interface{} if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { t.Fatalf("%#v", err) } if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { t.Fatalf("%#v", stdoutJson) } }
func TestGetImagesJSON(t *testing.T) { eng := engine.New() var called bool eng.Register("images", func(job *engine.Job) error { called = true v := createEnvFromGetImagesJSONStruct(sampleImage) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/images/json", nil, eng, t) if !called { t.Fatal("handler was not called") } assertHttpNotError(r, t) assertContentType(r, "application/json", t) var observed getImagesJSONStruct if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { t.Fatal(err) } if !reflect.DeepEqual(observed, sampleImage) { t.Errorf("Expected %#v but got %#v", sampleImage, observed) } }
func TestGetImagesJSONLegacyFormat(t *testing.T) { eng := engine.New() var called bool eng.Register("images", func(job *engine.Job) error { called = true outsLegacy := engine.NewTable("Created", 0) outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { return err } return nil }) r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) if !called { t.Fatal("handler was not called") } assertHttpNotError(r, t) assertContentType(r, "application/json", t) images := engine.NewTable("Created", 0) if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } if images.Len() != 1 { t.Fatalf("Expected 1 image, %d found", images.Len()) } image := images.Data[0] if image.Get("Tag") != "test-tag" { t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) } if image.Get("Repository") != "test-name" { t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) } }
func TestLogsNoStreams(t *testing.T) { eng := engine.New() var inspect bool var logs bool eng.Register("container_inspect", func(job *engine.Job) error { inspect = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } return nil }) eng.Register("logs", func(job *engine.Job) error { logs = true return nil }) r := serveRequest("GET", "/containers/test/logs", nil, eng, t) if r.Code != http.StatusBadRequest { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) } if inspect { t.Fatal("container_inspect job was called, but it shouldn't") } if logs { t.Fatal("logs job was called, but it shouldn't") } res := strings.TrimSpace(r.Body.String()) expected := "Bad parameters: you must choose at least one stream" if !strings.Contains(res, expected) { t.Fatalf("Output %s, expected %s in it", res, expected) } }
func TestGetImagesHistory(t *testing.T) { eng := engine.New() imageName := "docker-test-image" var called bool eng.Register("history", func(job *engine.Job) error { called = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != imageName { t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) } v := &engine.Env{} if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) if !called { t.Fatalf("handler was not called") } if r.Code != http.StatusOK { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) } if r.HeaderMap.Get("Content-Type") != "application/json" { t.Fatalf("%#v\n", r) } }
func TestGetVersion(t *testing.T) { eng := engine.New() var called bool eng.Register("version", func(job *engine.Job) error { called = true v := &engine.Env{} v.SetJson("Version", "42.1") v.Set("ApiVersion", "1.1.1.1.1") v.Set("GoVersion", "2.42") v.Set("Os", "Linux") v.Set("Arch", "x86_64") if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/version", nil, eng, t) if !called { t.Fatalf("handler was not called") } v := readEnv(r.Body, t) if v.Get("Version") != "42.1" { t.Fatalf("%#v\n", v) } if r.HeaderMap.Get("Content-Type") != "application/json" { t.Fatalf("%#v\n", r) } }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } eng := engine.New() signal.Trap(eng.Shutdown) // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { log.Fatal(err) } if err := d.Install(eng); err != nil { log.Fatal(err) } b := &builder.BuilderJob{eng, d} b.Install() // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, daemonCfg.ExecDriver, daemonCfg.GraphDriver, ) // Serve api job := eng.Job("serveapi", flHosts...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.Setenv("TrustKey", *flTrustKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } }
func TestLogs(t *testing.T) { eng := engine.New() var inspect bool var logs bool eng.Register("container_inspect", func(job *engine.Job) error { inspect = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } return nil }) expected := "logs" eng.Register("logs", func(job *engine.Job) error { logs = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } follow := job.Getenv("follow") if follow != "1" { t.Fatalf("follow: %s, must be 1", follow) } stdout := job.Getenv("stdout") if stdout != "1" { t.Fatalf("stdout %s, must be 1", stdout) } stderr := job.Getenv("stderr") if stderr != "" { t.Fatalf("stderr %s, must be empty", stderr) } timestamps := job.Getenv("timestamps") if timestamps != "1" { t.Fatalf("timestamps %s, must be 1", timestamps) } job.Stdout.Write([]byte(expected)) return nil }) r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) if r.Code != http.StatusOK { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) } if !inspect { t.Fatal("container_inspect job was not called") } if !logs { t.Fatal("logs job was not called") } res := r.Body.String() if res != expected { t.Fatalf("Output %s, expected %s", res, expected) } }
func TestLogEvents(t *testing.T) { e := New() eng := engine.New() if err := e.Install(eng); err != nil { t.Fatal(err) } for i := 0; i < eventsLimit+16; i++ { action := fmt.Sprintf("action_%d", i) id := fmt.Sprintf("cont_%d", i) from := fmt.Sprintf("image_%d", i) job := eng.Job("log", action, id, from) if err := job.Run(); err != nil { t.Fatal(err) } } time.Sleep(50 * time.Millisecond) if len(e.events) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) } job := eng.Job("events") job.SetenvInt64("since", 1) job.SetenvInt64("until", time.Now().Unix()) buf := bytes.NewBuffer(nil) job.Stdout.Add(buf) if err := job.Run(); err != nil { t.Fatal(err) } buf = bytes.NewBuffer(buf.Bytes()) dec := json.NewDecoder(buf) var msgs []utils.JSONMessage for { var jm utils.JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } t.Fatal(err) } msgs = append(msgs, jm) } if len(msgs) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs)) } first := msgs[0] if first.Status != "action_16" { t.Fatalf("First action is %s, must be action_15", first.Status) } last := msgs[len(msgs)-1] if last.Status != "action_79" { t.Fatalf("First action is %s, must be action_79", first.Status) } }
func TestGetImagesJSONAll(t *testing.T) { eng := engine.New() allFilter := "-1" eng.Register("images", func(job *engine.Job) error { allFilter = job.Getenv("all") return nil }) serveRequest("GET", "/images/json?all=1", nil, eng, t) if allFilter != "1" { t.Errorf("%#v", allFilter) } }
func TestGetImagesJSONFilters(t *testing.T) { eng := engine.New() filter := "nothing" eng.Register("images", func(job *engine.Job) error { filter = job.Getenv("filters") return nil }) serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) if filter != "nnnn" { t.Errorf("%#v", filter) } }
func TestGetImagesJSONFilter(t *testing.T) { eng := engine.New() filter := "nothing" eng.Register("images", func(job *engine.Job) engine.Status { filter = job.Getenv("filter") return engine.StatusOK }) serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) if filter != "aaaa" { t.Errorf("%#v", filter) } }
func InitializeDockerEngine(configuration types.KraneConfiguration) (eng *dockerEngine.Engine) { eng = dockerEngine.New() eng.Hack_SetGlobalVar("configuration", configuration) // Load default plugins builtins.Register(eng) eng.Register("server_krane_api", server.ServeApi) eng.Register("ssh_tunnel", ssh.Tunnel) listenURL := &url.URL{ Scheme: configuration.Production.Server.Host.Schema, Host: configuration.Production.Server.Host.Fqdn + ":" + strconv.Itoa(configuration.Production.Server.Host.Port), } job := eng.Job("server_krane_api", listenURL.String()) parameters := url.Values{} fleet, err := configuration.Driver.List(parameters) if err != nil { log.Fatalf("unable to get list of ships from %s", configuration.Driver.Name()) } configuration.Production.Fleet.Append(fleet.Ships()) eng.Hack_SetGlobalVar("configuration", configuration) for _, ship := range configuration.Production.Fleet.Ships() { fmt.Printf("We are going to queue %s\n", ship.Fqdn) ssh_job := eng.Job("ssh_tunnel", ship.Fqdn, "false") ssh_job.Run() } job.SetenvBool("Logging", true) job.SetenvBool("AutoRestart", true) job.Setenv("ExecDriver", "native") job.SetenvBool("EnableCors", true) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } return eng }
func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env) { eng := engine.New() eng.Logging = false done := make(chan bool) // set IPv6 global if given if input.Exists("globalIPv6Network") { _, globalIPv6Network, _ = net.ParseCIDR(input.Get("globalIPv6Network")) } job := eng.Job("allocate_interface", "container_id") job.Env().Init(&input) reader, _ := job.Stdout.AddPipe() go func() { output.Decode(reader) done <- true }() res := Allocate(job) job.Stdout.Close() <-done if input.Exists("expectFail") && input.GetBool("expectFail") { if res == engine.StatusOK { t.Fatal("Doesn't fail to allocate network interface") } } else { if res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } } if input.Exists("globalIPv6Network") { // check for bug #11427 _, subnet, _ := net.ParseCIDR(input.Get("globalIPv6Network")) if globalIPv6Network.IP.String() != subnet.IP.String() { t.Fatal("globalIPv6Network was modified during allocation") } // clean up IPv6 global globalIPv6Network = nil } return }
func TestAllocatePortReclaim(t *testing.T) { eng := engine.New() eng.Logging = false freePort := findFreePort(t) // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } // Occupy port listenAddr := fmt.Sprintf(":%d", freePort) tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr) if err != nil { t.Fatalf("Failed to resolve TCP address '%s'", listenAddr) } l, err := net.ListenTCP("tcp", tcpListenAddr) if err != nil { t.Fatalf("Fail to listen on port %d", freePort) } // Allocate port, expect failure job = newPortAllocationJob(eng, freePort) if res := AllocatePort(job); res == engine.StatusOK { t.Fatal("Successfully allocated currently used port") } // Reclaim port, retry allocation l.Close() if res := AllocatePort(job); res != engine.StatusOK { t.Fatal("Failed to allocate previously reclaimed port") } }
func TestEventsCountJob(t *testing.T) { e := New() eng := engine.New() if err := e.Install(eng); err != nil { t.Fatal(err) } l1 := make(chan *utils.JSONMessage) l2 := make(chan *utils.JSONMessage) e.subscribe(l1) e.subscribe(l2) job := eng.Job("subscribers_count") env, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Fatal(err) } count := env.GetInt("count") if count != 2 { t.Fatalf("There must be 2 subscribers, got %d", count) } }
func TestGetEvents(t *testing.T) { eng := engine.New() var called bool eng.Register("events", func(job *engine.Job) error { called = true since := job.Getenv("since") if since != "1" { t.Fatalf("'since' should be 1, found %#v instead", since) } until := job.Getenv("until") if until != "0" { t.Fatalf("'until' should be 0, found %#v instead", until) } v := &engine.Env{} v.Set("since", since) v.Set("until", until) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }) r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) if !called { t.Fatal("handler was not called") } assertContentType(r, "application/json", t) var stdout_json struct { Since int Until int } if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { t.Fatal(err) } if stdout_json.Since != 1 { t.Errorf("since != 1: %#v", stdout_json.Since) } if stdout_json.Until != 0 { t.Errorf("until != 0: %#v", stdout_json.Until) } }
func TestLinkContainers(t *testing.T) { eng := engine.New() eng.Logging = false // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } job.Args[0] = "-I" job.Setenv("ChildIP", "172.17.0.2") job.Setenv("ParentIP", "172.17.0.1") job.SetenvBool("IgnoreErrors", false) job.SetenvList("Ports", []string{"1234"}) bridgeIface = "lo" _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { t.Fatal(err) } if res := LinkContainers(job); res != engine.StatusOK { t.Fatalf("LinkContainers failed") } // flush rules if _, err = iptables.Raw([]string{"-F", "DOCKER"}...); err != nil { t.Fatal(err) } }
func TestDeleteContainers(t *testing.T) { eng := engine.New() name := "foo" var called bool eng.Register("rm", func(job *engine.Job) error { called = true if len(job.Args) == 0 { t.Fatalf("Job arguments is empty") } if job.Args[0] != name { t.Fatalf("name != '%s': %#v", name, job.Args[0]) } return nil }) r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) if !called { t.Fatalf("handler was not called") } if r.Code != http.StatusNoContent { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) } }
func TestDeleteContainersWithStopAndKill(t *testing.T) { if api.APIVERSION.LessThan("1.14") { return } eng := engine.New() var called bool eng.Register("container_delete", func(job *engine.Job) engine.Status { called = true return engine.StatusOK }) r := serveRequest("DELETE", "/containers/foo?stop=1&kill=1", nil, eng, t) if r.Code != http.StatusBadRequest { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) } if called { t.Fatalf("container_delete jobs was called, but it shouldn't") } res := strings.TrimSpace(r.Body.String()) expected := "Bad parameters: can't use stop and kill simultaneously" if !strings.Contains(res, expected) { t.Fatalf("Output %s, expected %s in it", res, expected) } }
func TestHostnameFormatChecking(t *testing.T) { eng := engine.New() eng.Logging = false freePort := findFreePort(t) // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } // Allocate port with invalid HostIP, expect failure with Bad Request http status job = newPortAllocationJobWithInvalidHostIP(eng, freePort) if res := AllocatePort(job); res == engine.StatusOK { t.Fatal("Failed to check invalid HostIP") } }
func main() { if len(dockerConfDir) == 0 { dockerConfDir = filepath.Join(os.Getenv("HOME"), ".docker") } if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { // Running in init mode sysinit.SysInit() return } var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flGraphOpts opts.ListOpts flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIPAddress) flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerConfDir, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerConfDir, defaultCertFile), "Path to TLS certificate file") flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerConfDir, defaultKeyFile), "Path to TLS key file") flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options") flag.Parse() if *flVersion { showVersion() return } if flHosts.Len() == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } flHosts.Set(defaultHost) } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !*flEnableIptables && !*flInterContainerComm { log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if net.ParseIP(*flDefaultIp) == nil { log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) } if *flDebug { os.Setenv("DEBUG", "1") } if *flDaemon { if runtime.GOOS != "linux" { log.Fatalf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } if flag.NArg() != 0 { flag.Usage() return } // set up the TempDir to use a canonical path tmp := os.TempDir() realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) // get the canonical path to the Docker root directory root := *flRoot var realRoot string if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { realRoot = root } else { realRoot, err = utils.ReadSymlinkedDirectory(root) if err != nil { log.Fatalf("Unable to get the full path to root (%s): %s", root, err) } } if err := checkKernelAndArch(); err != nil { log.Fatal(err) } eng := engine.New() // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // handle the pidfile early. https://github.com/docker/docker/issues/6973 if len(*pidfile) > 0 { job := eng.Job("initserverpidfile", *pidfile) if err := job.Run(); err != nil { log.Fatal(err) } } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") // include the variable here too, for the server config job.Setenv("Pidfile", *pidfile) job.Setenv("Root", realRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) job.SetenvList("Sockets", flHosts.GetAll()) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, *flExecDriver, *flGraphDriver) // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } } else { if flHosts.Len() > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // If we should verify the server, we need to load a trusted ca if *flTlsVerify { *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } tlsConfig.Certificates = []tls.Certificate{cert} } } if *flTls || *flTlsVerify { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.ParseCommands(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } } }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !*flEnableIptables && !*flInterContainerComm { log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if net.ParseIP(*flDefaultIp) == nil { log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) } eng := engine.New() signal.Trap(eng.Shutdown) // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") // include the variable here too, for the server config job.Setenv("Pidfile", *pidfile) job.Setenv("Root", *flRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) job.SetenvList("Sockets", flHosts.GetAll()) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, *flExecDriver, *flGraphDriver) // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } }
func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } eng := engine.New() signal.Trap(eng.Shutdown) if err := migrateKey(); err != nil { logrus.Fatal(err) } daemonCfg.TrustKeyPath = *flTrustKey // Load builtins if err := builtins.Register(eng); err != nil { logrus.Fatal(err) } // load registry service if err := registry.NewService(registryCfg).Install(eng); err != nil { logrus.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting daemonInitWait := make(chan error) go func() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { daemonInitWait <- err return } logrus.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, d.ExecutionDriver().Name(), d.GraphDriver().String(), ) if err := d.Install(eng); err != nil { daemonInitWait <- err return } b := &builder.BuilderJob{eng, d} b.Install() // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { daemonInitWait <- err return } daemonInitWait <- nil }() // Serve api job := eng.Job("serveapi", flHosts...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", daemonCfg.EnableCors) job.Setenv("CorsHeaders", daemonCfg.CorsHeaders) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", daemonCfg.SocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) // The serve API job never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := job.Run(); err != nil { logrus.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return } serveAPIWait <- nil }() // Wait for the daemon startup goroutine to finish // This makes sure we can actually cleanly shutdown the daemon logrus.Debug("waiting for daemon to initialize") errDaemon := <-daemonInitWait if errDaemon != nil { eng.Shutdown() outStr := fmt.Sprintf("Shutting down daemon due to errors: %v", errDaemon) if strings.Contains(errDaemon.Error(), "engine is shutdown") { // if the error is "engine is shutdown", we've already reported (or // will report below in API server errors) the error outStr = "Shutting down daemon due to reported errors" } // we must "fatal" exit here as the API server may be happy to // continue listening forever if the error had no impact to API logrus.Fatal(outStr) } else { logrus.Info("Daemon has completed initialization") } // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete errAPI := <-serveAPIWait // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) eng.Shutdown() if errAPI != nil { logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } }