func (container *Container) cleanup() { container.releaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { utils.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.CloseWriters(); err != nil { utils.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.CloseWriters(); err != nil { utils.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.ptyMaster != nil { if err := container.ptyMaster.Close(); err != nil { utils.Errorf("%s: Error closing Pty master: %s", container.ID, err) } } if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } }
func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) exitCode, err = container.daemon.Run(container, pipes, callback) if err != nil { utils.Errorf("Error running container: %s", err) } container.State.SetStopped(exitCode) // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } if container.daemon != nil && container.daemon.srv != nil { container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image)) } if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() { // FIXME: here is race condition between two RUN instructions in Dockerfile // because they share same runconfig and change image. Must be fixed // in server/buildfile.go if err := container.toDisk(); err != nil { utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err) } } return err }
func (daemon *Daemon) Close() error { errorsStrings := []string{} if err := daemon.shutdown(); err != nil { utils.Errorf("daemon.shutdown(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := daemon.driver.Cleanup(); err != nil { utils.Errorf("daemon.driver.Cleanup(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := daemon.containerGraph.Close(); err != nil { utils.Errorf("daemon.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := mount.Unmount(daemon.config.Root); err != nil { utils.Errorf("daemon.Umount(%s): %s", daemon.config.Root, err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } return nil }
func (container *Container) cleanup() { container.releaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { utils.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.CloseWriters(); err != nil { utils.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.CloseWriters(); err != nil { utils.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.ptyMaster != nil { if err := container.ptyMaster.Close(); err != nil { utils.Errorf("%s: Error closing Pty master: %s", container.ID, err) } } var ( root = container.RootfsPath() mounts = []string{ root, path.Join(root, "/.dockerinit"), path.Join(root, "/.dockerenv"), path.Join(root, "/etc/resolv.conf"), } ) if container.HostnamePath != "" && container.HostsPath != "" { mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts")) } for r := range container.Volumes { mounts = append(mounts, path.Join(root, r)) } for i := len(mounts) - 1; i >= 0; i-- { if lastError := mount.Unmount(mounts[i]); lastError != nil { log.Printf("Failed to umount %v: %v", mounts[i], lastError) } } if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } }
func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := osStat(target) if err != nil { if osIsNotExist(err) { utils.Errorf("There are no more loopback device available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&osModeDevice != osModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = osOpenFile(target, osORdWr, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != sysEBusy { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil }
func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) if container.command == nil { // This happends when you have a GHOST container with lxc populateCommand(container) err = container.runtime.RestoreCommand(container) } else { pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) exitCode, err = container.runtime.Run(container, pipes, callback) } if err != nil { utils.Errorf("Error running container: %s", err) } container.State.SetStopped(exitCode) // FIXME: there is a race condition here which causes this to fail during the unit tests. // If another goroutine was waiting for Wait() to return before removing the container's root // from the filesystem... At this point it may already have done so. // This is because State.setStopped() has already been called, and has caused Wait() // to return. // FIXME: why are we serializing running state to disk in the first place? //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) if err := container.ToDisk(); err != nil { utils.Errorf("Error dumping container state to disk: %s\n", err) } // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } if container.runtime != nil && container.runtime.srv != nil { container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) } close(container.waitLock) return err }
func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) for path, count := range devices.activeMounts { for i := count; i > 0; i-- { if err := sysUnmount(path, 0); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err) } } delete(devices.activeMounts, path) } for _, d := range devices.Devices { if err := devices.waitClose(d.Hash); err != nil { utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) } if err := devices.deactivateDevice(d.Hash); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) } } pool := devices.getPoolDevName() if devinfo, err := getInfo(pool); err == nil && devinfo.Exists != 0 { if err := devices.deactivateDevice("pool"); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", pool, err) } } return nil }
func (devices *DeviceSet) deactivateDevice(hash string) error { utils.Debugf("[devmapper] deactivateDevice(%s)", hash) defer utils.Debugf("[devmapper] deactivateDevice END") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(hash); err != nil { utils.Errorf("Warning: error waiting for device %s to close: %s\n", hash, err) } info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } devinfo, err := getInfo(info.Name()) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } return nil }
func (b *buildFile) addContext(container *Container, orig, dest string) error { origPath := path.Join(b.context, orig) destPath := path.Join(container.RootfsPath(), dest) // Preserve the trailing '/' if strings.HasSuffix(dest, "/") { destPath = destPath + "/" } if !strings.HasPrefix(origPath, b.context) { return fmt.Errorf("Forbidden path: %s", origPath) } fi, err := os.Stat(origPath) if err != nil { return fmt.Errorf("%s: no such file or directory", orig) } if fi.IsDir() { if err := CopyWithTar(origPath, destPath); err != nil { return err } // First try to unpack the source as an archive } else if err := UntarPath(origPath, destPath); err != nil { utils.Errorf("Couldn't untar %s to %s: %s", origPath, destPath, err) // If that fails, just copy it as a regular file if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } if err := CopyWithTar(origPath, destPath); err != nil { return err } } return nil }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Println(r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = api.APIVERSION } if enableCors { writeCorsHeaders(w, r) } if version.GreaterThan(api.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Error: %s", err) httpError(w, err) } } }
func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] copyData := &APICopy{} contentType := r.Header.Get("Content-Type") if contentType == "application/json" { if err := json.NewDecoder(r.Body).Decode(copyData); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Resource == "" { return fmt.Errorf("Resource cannot be empty") } if copyData.Resource[0] == '/' { copyData.Resource = copyData.Resource[1:] } if err := srv.ContainerCopy(name, copyData.Resource, w); err != nil { utils.Errorf("%s", err.Error()) return err } return nil }
func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Println(r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && userAgent[1] != VERSION { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], VERSION) } } version, err := strconv.ParseFloat(mux.Vars(r)["version"], 64) if err != nil { version = APIVERSION } if srv.runtime.config.EnableCors { writeCorsHeaders(w, r) } if version == 0 || version > APIVERSION { w.WriteHeader(http.StatusNotFound) return } if err := handlerFunc(srv, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Error: %s", err) httpError(w, err) } } }
func LoopbackSetCapacity(file *osFile) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil }
func matchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType }
func postContainersCopy(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if contentType := r.Header.Get("Content-Type"); contentType == "application/json" { if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) } return nil }
// Check if an image exists in the Registry // TODO: This method should return the errors instead of masking them and returning false func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } setTokenAuth(req, token) res, err := r.client.Do(req) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } res.Body.Close() return res.StatusCode == 200 }
func postCommit(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) ) if err := config.Import(r.Body); err != nil { utils.Errorf("%s", err) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) var id string job.Stdout.AddString(&id) if err := job.Run(); err != nil { return err } env.Set("Id", id) return writeJSON(w, http.StatusCreated, env) }
func wsContainersAttach(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() job := eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(ws) job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { utils.Errorf("Error: %s", err) } }) h.ServeHTTP(w, r) return nil }
func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) for _, info := range devices.Devices { if info.mountCount > 0 { if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } } } for _, d := range devices.Devices { if err := devices.waitClose(d.Hash); err != nil { utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) } if err := devices.deactivateDevice(d.Hash); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) } } if err := devices.deactivatePool(); err != nil { utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } return nil }
func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) stdoutBuffer = bytes.NewBuffer(nil) ) if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusCreated, env) }
func (runtime *Runtime) restore() error { wheel := "-\\|/" if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(runtime.repository) if err != nil { return err } for i, v := range dir { id := v.Name() container, err := runtime.Load(id) if i%21 == 0 && os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("\b%c", wheel[i%4]) } if err != nil { utils.Errorf("Failed to load container %v: %v", id, err) continue } utils.Debugf("Loaded container %v", container.ID) } if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("\bdone.\n") } return nil }
func (runtime *Runtime) restore() error { wheel := "-\\|/" if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(runtime.repository) if err != nil { return err } containers := make(map[string]*Container) for i, v := range dir { id := v.Name() container, err := runtime.load(id) if i%21 == 0 && os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("\b%c", wheel[i%4]) } if err != nil { utils.Errorf("Failed to load container %v: %v", id, err) continue } utils.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } register := func(container *Container) { if err := runtime.Register(container); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } } if entities := runtime.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { e := entities[p] if container, ok := containers[e.ID()]; ok { register(container) delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links name := generateRandomName(runtime) container.Name = name if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } register(container) } if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("\bdone.\n") } return nil }
func GetBlockDeviceSize(file *osFile) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil }
func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil }
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { sendEvent := func(wf *utils.WriteFlusher, event *utils.JSONMessage) error { b, err := json.Marshal(event) if err != nil { return fmt.Errorf("JSON error") } _, err = wf.Write(b) if err != nil { // On error, evict the listener utils.Errorf("%s", err) srv.Lock() delete(srv.listeners, r.RemoteAddr) srv.Unlock() return err } return nil } if err := parseForm(r); err != nil { return err } listener := make(chan utils.JSONMessage) srv.Lock() srv.listeners[r.RemoteAddr] = listener srv.Unlock() since, err := strconv.ParseInt(r.Form.Get("since"), 10, 0) if err != nil { since = 0 } w.Header().Set("Content-Type", "application/json") wf := utils.NewWriteFlusher(w) wf.Flush() if since != 0 { // If since, send previous events that happened after the timestamp for _, event := range srv.events { if event.Time >= since { err := sendEvent(wf, &event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { return err } } } } for event := range listener { err := sendEvent(wf, &event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { return err } } return nil }
func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
// attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *osFile. func attachLoopDevice(sparseName string) (loop *osFile, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { utils.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { utils.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { utils.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil }
func (runtime *Runtime) Close() error { errorsStrings := []string{} if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.driver.Cleanup(); err != nil { utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.containerGraph.Close(); err != nil { utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } return nil }
func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] if err := srv.ContainerExport(name, w); err != nil { utils.Errorf("%s", err) return err } return nil }
// During cleanup aufs needs to unmount all mountpoints func (a *Driver) Cleanup() error { ids, err := loadIds(path.Join(a.rootPath(), "layers")) if err != nil { return err } for _, id := range ids { if err := a.unmount(id); err != nil { utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } return nil }