// Make sure the config is compatible with the current kernel func (container *Container) verifyDaemonSettings() { if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") container.Config.Memory = 0 } if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") container.Config.MemorySwap = -1 } if container.daemon.sysInfo.IPv4ForwardingDisabled { log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work") } }
// ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request log.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = api.APIVERSION } if enableCors { writeCorsHeaders(w, r) } if version.GreaterThan(api.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func (ts *TarSum) Sum(extra []byte) string { var sums []string for _, sum := range ts.sums { sums = append(sums, sum) } sort.Strings(sums) h := sha256.New() if extra != nil { h.Write(extra) } for _, sum := range sums { log.Infof("-->%s<--", sum) h.Write([]byte(sum)) } checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil)) log.Infof("checksum processed: %s", checksum) return checksum }
func (daemon *Daemon) checkLocaldns() error { resolvConf, err := resolvconf.Get() if err != nil { return err } if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns) daemon.config.Dns = DefaultDns } return nil }
// release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { log.Infof("Unable to release ip %s", err) } return engine.StatusOK }
func checkKernelAndArch() error { // Check for unsupported architectures if runtime.GOARCH != "amd64" { return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) } // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.8 crashes are clearer. // For details see http://github.com/dockercn/docker/issues/407 if k, err := kernel.GetKernelVersion(); err != nil { log.Infof("WARNING: %s", err) } else { if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) } } } return nil }
func (container *Container) Stop(seconds int) error { if !container.State.IsRunning() { return nil } // 1. Send a SIGTERM if err := container.KillSig(15); err != nil { log.Infof("Failed to send SIGTERM to the process, force killing") if err := container.KillSig(9); err != nil { return err } } // 2. Wait for the process to exit on its own if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil { log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { container.State.WaitStop(-1 * time.Second) return err } } return nil }
func (r *resumableRequestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { return 0, fmt.Errorf("client and request can't be nil\n") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) r.request.Header.Set("Range", readRange) time.Sleep(5 * time.Second) } if r.currentResponse == nil { r.currentResponse, err = r.client.Do(r.request) isFreshRequest = true } if err != nil && r.failures+1 != r.maxFailures { r.cleanUpResponse() r.failures += 1 time.Sleep(5 * time.Duration(r.failures) * time.Second) return 0, nil } else if err != nil { r.cleanUpResponse() return 0, err } if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { r.cleanUpResponse() return 0, io.EOF } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { r.cleanUpResponse() return 0, fmt.Errorf("the server doesn't support byte ranges") } if r.totalSize == 0 { r.totalSize = r.currentResponse.ContentLength } else if r.totalSize <= 0 { r.cleanUpResponse() return 0, fmt.Errorf("failed to auto detect content length") } n, err = r.currentResponse.Body.Read(p) r.lastRange += int64(n) if err != nil { r.cleanUpResponse() } if err != nil && err != io.EOF { log.Infof("encountered error during pull and clearing it before resume: %s", err) err = nil } return n, err }
func (container *Container) Kill() error { if !container.State.IsRunning() { return nil } // 1. Send SIGKILL if err := container.KillSig(9); err != nil { return err } // 2. Wait for the process to die, in last resort, try to kill the process directly if _, err := container.State.WaitStop(10 * time.Second); err != nil { // Ensure that we don't kill ourselves if pid := container.State.GetPid(); pid != 0 { log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { return err } } } container.State.WaitStop(-1 * time.Second) return nil }
// FIXME: rename to ContainerRemove for consistency with the CLI command. func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") container := daemon.Get(name) if removeLink { if container == nil { return job.Errorf("No such link: %s", name) } name, err := GetFullContainerName(name) if err != nil { job.Error(err) } parent, n := path.Split(name) if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } pe := daemon.ContainerGraph().Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := daemon.Get(pe.ID()) if parentContainer != nil { parentContainer.DisableLink(n) } if err := daemon.ContainerGraph().Delete(name); err != nil { return job.Error(err) } return engine.StatusOK } if container != nil { if container.State.IsRunning() { if forceRemove { if err := container.Kill(); err != nil { return job.Errorf("Could not kill running container, cannot remove - %v", err) } } else { return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f") } } if err := daemon.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) } container.LogEvent("destroy") if removeVolume { var ( volumes = make(map[string]struct{}) binds = make(map[string]struct{}) usedVolumes = make(map[string]*Container) ) // the volume id is always the base of the path getVolumeId := func(p string) string { return filepath.Base(strings.TrimSuffix(p, "/layer")) } // populate bind map so that they can be skipped and not removed for _, bind := range container.HostConfig().Binds { source := strings.Split(bind, ":")[0] // TODO: refactor all volume stuff, all of it // it is very important that we eval the link or comparing the keys to container.Volumes will not work // // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it p, err := filepath.EvalSymlinks(source) if err != nil && !os.IsNotExist(err) { return job.Error(err) } if p != "" { source = p } binds[source] = struct{}{} } // Store all the deleted containers volumes for _, volumeId := range container.Volumes { // Skip the volumes mounted from external // bind mounts here will will be evaluated for a symlink if _, exists := binds[volumeId]; exists { continue } volumeId = getVolumeId(volumeId) volumes[volumeId] = struct{}{} } // Retrieve all volumes from all remaining containers for _, container := range daemon.List() { for _, containerVolumeId := range container.Volumes { containerVolumeId = getVolumeId(containerVolumeId) usedVolumes[containerVolumeId] = container } } for volumeId := range volumes { // If the requested volu if c, exists := usedVolumes[volumeId]; exists { log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.", volumeId, c.ID) continue } if err := daemon.Volumes().Delete(volumeId); err != nil { return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) } } } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
func (daemon *Daemon) restore() error { var ( debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") containers = make(map[string]*Container) currentDriver = daemon.driver.String() ) if !debug { log.Infof("Loading containers: ") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { return err } for _, v := range dir { id := v.Name() container, err := daemon.load(id) if !debug { fmt.Print(".") } if err != nil { log.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { log.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } registeredContainers := []*Container{} if entities := daemon.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if !debug { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { if err := daemon.register(container, false); err != nil { log.Debugf("Failed to register container %s: %s", container.ID, err) } registeredContainers = append(registeredContainers, container) // delete from the map so that a new name is not automatically generated delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links container.Name, err = daemon.generateNewName(container.ID) if err != nil { log.Debugf("Setting default id - %s", err) } if err := daemon.register(container, false); err != nil { log.Debugf("Failed to register container %s: %s", container.ID, err) } registeredContainers = append(registeredContainers, container) } // check the restart policy on the containers and restart any container with // the restart policy of "always" if daemon.config.AutoRestart { log.Debugf("Restarting containers...") for _, container := range registeredContainers { if container.hostConfig.RestartPolicy.Name == "always" || (container.hostConfig.RestartPolicy.Name == "on-failure" && container.State.ExitCode != 0) { log.Debugf("Starting container %s", container.ID) if err := container.Start(); err != nil { log.Debugf("Failed to start container %s: %s", container.ID, err) } } } } if !debug { log.Infof(": done.") } return nil }
// ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. func ListenAndServe(proto, addr string, job *engine.Job) error { var l net.Listener r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return err } if proto == "fd" { return ServeFd(addr, r) } if proto == "unix" { if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return err } } var oldmask int if proto == "unix" { oldmask = syscall.Umask(0777) } if job.GetenvBool("BufferRequests") { l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) } else { l, err = net.Listen(proto, addr) } if proto == "unix" { syscall.Umask(oldmask) } if err != nil { return err } if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { tlsCert := job.Getenv("TlsCert") tlsKey := job.Getenv("TlsKey") cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", tlsCert, tlsKey, err) } tlsConfig := &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: []tls.Certificate{cert}, } if job.GetenvBool("TlsVerify") { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(job.Getenv("TlsCa")) if err != nil { return fmt.Errorf("Couldn't read CA certificate: %s", err) } certPool.AppendCertsFromPEM(file) tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = certPool } l = tls.NewListener(l, tlsConfig) } // Basic error and sanity checking switch proto { case "tcp": if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": socketGroup := job.Getenv("SocketGroup") if socketGroup != "" { if err := changeGroup(addr, socketGroup); err != nil { if socketGroup == "docker" { // if the user hasn't explicitly specified the group ownership, don't fail on errors. log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) } else { return err } } } if err := os.Chmod(addr, 0660); err != nil { return err } default: return fmt.Errorf("Invalid protocol format.") } httpSrv := http.Server{Addr: addr, Handler: r} return httpSrv.Serve(l) }