func (container *Container) cleanup() { container.releaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { utils.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.CloseWriters(); err != nil { utils.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.CloseWriters(); err != nil { utils.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.ptyMaster != nil { if err := container.ptyMaster.Close(); err != nil { utils.Errorf("%s: Error closing Pty master: %s", container.ID, err) } } var ( root = container.RootfsPath() mounts = []string{ root, path.Join(root, "/.dockerinit"), path.Join(root, "/.dockerenv"), path.Join(root, "/etc/resolv.conf"), } ) if container.HostnamePath != "" && container.HostsPath != "" { mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts")) } for r := range container.Volumes { mounts = append(mounts, path.Join(root, r)) } for i := len(mounts) - 1; i >= 0; i-- { if lastError := mount.Unmount(mounts[i]); lastError != nil { log.Printf("Failed to umount %v: %v", mounts[i], lastError) } } if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } }
func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := osStat(target) if err != nil { if osIsNotExist(err) { utils.Errorf("There are no more loopback device available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&osModeDevice != osModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = osOpenFile(target, osORdWr, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != sysEBusy { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil }
func LoopbackSetCapacity(file *osFile) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Println(r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && userAgent[1] != dockerVersion { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version, err := strconv.ParseFloat(mux.Vars(r)["version"], 64) if err != nil { version = APIVERSION } if enableCors { writeCorsHeaders(w, r) } if version == 0 || version > APIVERSION { http.Error(w, fmt.Errorf("client and server don't have same version (client : %g, server: %g)", version, APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Error: %s", err) httpError(w, err) } } }
func postContainersCopy(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if contentType := r.Header.Get("Content-Type"); contentType == "application/json" { if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) } return nil }
func wsContainersAttach(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() job := eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(ws) job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { utils.Errorf("Error: %s", err) } }) h.ServeHTTP(w, r) return nil }
func postCommit(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) ) if err := config.Import(r.Body); err != nil { utils.Errorf("%s", err) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) var id string job.Stdout.AddString(&id) if err := job.Run(); err != nil { return err } env.Set("Id", id) return writeJSON(w, http.StatusCreated, env) }
func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType }
func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) for path, count := range devices.activeMounts { for i := count; i > 0; i-- { if err := sysUnmount(path, 0); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err) } } delete(devices.activeMounts, path) } for _, d := range devices.Devices { if err := devices.waitClose(d.Hash); err != nil { utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) } if err := devices.deactivateDevice(d.Hash); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) } } pool := devices.getPoolDevName() if devinfo, err := getInfo(pool); err == nil && devinfo.Exists != 0 { if err := devices.deactivateDevice("pool"); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", pool, err) } } return nil }
// Check if an image exists in the Registry // TODO: This method should return the errors instead of masking them and returning false func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } res.Body.Close() return res.StatusCode == 200 }
func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil }
func GetBlockDeviceSize(file *osFile) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil }
func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
func (runtime *Runtime) Close() error { errorsStrings := []string{} if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.driver.Cleanup(); err != nil { utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.containerGraph.Close(); err != nil { utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } return nil }
// attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *osFile. func attachLoopDevice(sparseName string) (loop *osFile, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { utils.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { utils.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { utils.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil }
// During cleanup aufs needs to unmount all mountpoints func (a *Driver) Cleanup() error { ids, err := loadIds(path.Join(a.rootPath(), "layers")) if err != nil { return err } for _, id := range ids { if err := a.unmount(id); err != nil { utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } return nil }
// GetSize, return real size, virtual size func (container *Container) GetSize() (int64, int64) { var ( sizeRw, sizeRootfs int64 err error driver = container.runtime.driver ) if err := container.Mount(); err != nil { utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { sizeRw, err = differ.DiffSize(container.ID) if err != nil { utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } } else { changes, _ := container.Changes() if changes != nil { sizeRw = archive.ChangesSize(container.basefs, changes) } else { sizeRw = -1 } } if _, err = os.Stat(container.basefs); err != nil { if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { sizeRootfs = -1 } } return sizeRw, sizeRootfs }
func (d *Driver) Remove(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() if d.active[id] != 0 { utils.Errorf("Warning: removing active id %s\n", id) } mp := path.Join(d.home, "mnt", id) if err := d.unmount(id, mp); err != nil { return err } return d.DeviceSet.RemoveDevice(id) }
func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) if container.command == nil { // This happends when you have a GHOST container with lxc populateCommand(container) err = container.runtime.RestoreCommand(container) } else { exitCode, err = container.runtime.Run(container, callback) } if err != nil { utils.Errorf("Error running container: %s", err) } // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } container.State.SetStopped(exitCode) if container.runtime != nil && container.runtime.srv != nil { container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) } close(container.waitLock) // FIXME: there is a race condition here which causes this to fail during the unit tests. // If another goroutine was waiting for Wait() to return before removing the container's root // from the filesystem... At this point it may already have done so. // This is because State.setStopped() has already been called, and has caused Wait() // to return. // FIXME: why are we serializing running state to disk in the first place? //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) container.ToDisk() return err }
func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) utils.Debugf("Starting proxy on udp/%v for udp/%v", proxy.frontendAddr, proxy.backendAddr) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) if utils.IsClosedError(err) { utils.Debugf("Stopping proxy on udp/%v for udp/%v (socket was closed)", proxy.frontendAddr, proxy.backendAddr) } else { utils.Errorf("Stopping proxy on udp/%v for udp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error()) } break } fromKey := newConnTrackKey(from) proxy.connTrackLock.Lock() proxyConn, hit := proxy.connTrackTable[*fromKey] if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) continue } proxy.connTrackTable[*fromKey] = proxyConn go proxy.replyLoop(proxyConn, from, fromKey) } proxy.connTrackLock.Unlock() for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) break } i += written utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, proxy.backendAddr.String()) } } }
// Unmount and remove the dir information func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if a.active[id] != 0 { utils.Errorf("Warning: removing active id %s\n", id) } // Make sure the dir is umounted first if err := a.unmount(id); err != nil { return err } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil }
func httpError(w http.ResponseWriter, err error) { statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. if strings.Contains(err.Error(), "No such") { statusCode = http.StatusNotFound } else if strings.Contains(err.Error(), "Bad parameter") { statusCode = http.StatusBadRequest } else if strings.Contains(err.Error(), "Conflict") { statusCode = http.StatusConflict } else if strings.Contains(err.Error(), "Impossible") { statusCode = http.StatusNotAcceptable } else if strings.Contains(err.Error(), "Wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(err.Error(), "hasn't been activated") { statusCode = http.StatusForbidden } if err != nil { utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) http.Error(w, err.Error(), statusCode) } }
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { var cStdout, cStderr io.ReadCloser var nJobs int errors := make(chan error, 3) if stdin != nil && container.Config.OpenStdin { nJobs += 1 if cStdin, err := container.StdinPipe(); err != nil { errors <- err } else { go func() { utils.Debugf("attach: stdin: begin") defer utils.Debugf("attach: stdin: end") // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if container.Config.StdinOnce && !container.Config.Tty { defer cStdin.Close() } else { defer func() { if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } }() } if container.Config.Tty { _, err = utils.CopyEscapable(cStdin, stdin) } else { _, err = io.Copy(cStdin, stdin) } if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stdin: %s", err) } errors <- err }() } } if stdout != nil { nJobs += 1 if p, err := container.StdoutPipe(); err != nil { errors <- err } else { cStdout = p go func() { utils.Debugf("attach: stdout: begin") defer utils.Debugf("attach: stdout: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() } if stdinCloser != nil { defer stdinCloser.Close() } _, err := io.Copy(stdout, cStdout) if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stdout: %s", err) } errors <- err }() } } else { go func() { if stdinCloser != nil { defer stdinCloser.Close() } if cStdout, err := container.StdoutPipe(); err != nil { utils.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStdout) } }() } if stderr != nil { nJobs += 1 if p, err := container.StderrPipe(); err != nil { errors <- err } else { cStderr = p go func() { utils.Debugf("attach: stderr: begin") defer utils.Debugf("attach: stderr: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() } if stdinCloser != nil { defer stdinCloser.Close() } _, err := io.Copy(stderr, cStderr) if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stderr: %s", err) } errors <- err }() } } else { go func() { if stdinCloser != nil { defer stdinCloser.Close() } if cStderr, err := container.StderrPipe(); err != nil { utils.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStderr) } }() } return utils.Go(func() error { defer func() { if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } }() // FIXME: how to clean up the stdin goroutine without the unwanted side effect // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i += 1 { utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) if err := <-errors; err != nil { utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) return err } utils.Debugf("attach: job %d completed successfully", i+1) } utils.Debugf("attach: all jobs completed successfully") return nil }) }
func (runtime *Runtime) restore() error { if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(runtime.repository) if err != nil { return err } containers := make(map[string]*Container) currentDriver := runtime.driver.String() for _, v := range dir { id := v.Name() container, err := runtime.load(id) if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Print(".") } if err != nil { utils.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { utils.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } register := func(container *Container) { if err := runtime.Register(container); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } } if entities := runtime.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { register(container) delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links container.Name, err = generateRandomName(runtime) if err != nil { container.Name = utils.TruncateID(container.ID) } if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } register(container) } if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf(": done.\n") } return nil }