func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) exitCode, err = container.daemon.Run(container, pipes, callback) if err != nil { utils.Errorf("Error running container: %s", err) } container.State.SetStopped(exitCode) // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } container.LogEvent("die") // If the engine is shutting down, don't save the container state as stopped. // This will cause it to be restarted when the engine is restarted. if container.daemon != nil && container.daemon.eng != nil && !container.daemon.eng.IsShutdown() { if err := container.toDisk(); err != nil { utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err) } } return err }
func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) exitCode, err = container.daemon.Run(container, pipes, callback) if err != nil { utils.Errorf("Error running container: %s", err) } container.State.SetStopped(exitCode) // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } if container.daemon != nil && container.daemon.srv != nil { container.LogEvent("die") } if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() { // FIXME: here is race condition between two RUN instructions in Dockerfile // because they share same runconfig and change image. Must be fixed // in builder/builder.go if err := container.toDisk(); err != nil { utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err) } } return err }
func (container *Container) cleanup() { container.releaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { utils.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.Clean(); err != nil { utils.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.Clean(); err != nil { utils.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.command != nil && container.command.Terminal != nil { if err := container.command.Terminal.Close(); err != nil { utils.Errorf("%s: Error closing terminal: %s", container.ID, err) } } if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } }
func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { utils.Errorf("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != syscall.EBUSY { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil }
// Check if an image exists in the Registry // TODO: This method should return the errors instead of masking them and returning false func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } res.Body.Close() return res.StatusCode == 200 }
func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType }
func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Println(r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = api.APIVERSION } if enableCors { writeCorsHeaders(w, r) } if version.GreaterThan(api.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) stdoutBuffer = bytes.NewBuffer(nil) ) if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) } if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { job.Setenv("pause", "1") } else { job.Setenv("pause", r.FormValue("pause")) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSON(w, http.StatusCreated, env) }
func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() job := eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(ws) job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { utils.Errorf("Error attaching websocket: %s", err) } }) h.ServeHTTP(w, r) return nil }
func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { images, _ := daemon.Graph().Map() var imgcount int if images == nil { imgcount = 0 } else { imgcount = len(images) } kernelVersion := "<unknown>" if kv, err := kernel.GetKernelVersion(); err == nil { kernelVersion = kv.String() } operatingSystem := "<unknown>" if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } if inContainer, err := operatingsystem.IsContainerized(); err != nil { utils.Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" } // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the daemon initPath = daemon.SystemInitPath() } cjob := job.Eng.Job("subscribers_count") env, _ := cjob.Stdout.AddEnv() if err := cjob.Run(); err != nil { return job.Error(err) } v := &engine.Env{} v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) v.SetJson("DriverStatus", daemon.GraphDriver().Status()) v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", env.GetInt("count")) v.Set("KernelVersion", kernelVersion) v.Set("OperatingSystem", operatingSystem) v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) v.SetList("Sockets", daemon.Sockets) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil }
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil }
func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
// attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func attachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { utils.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { utils.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { utils.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil }
func (i *info) IsRunning() bool { var running bool output, err := i.driver.getInfo(i.ID) if err != nil { utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) return false } if strings.Contains(string(output), "RUNNING") { running = true } return running }
// During cleanup aufs needs to unmount all mountpoints func (a *Driver) Cleanup() error { ids, err := loadIds(path.Join(a.rootPath(), "layers")) if err != nil { return err } for _, id := range ids { if err := a.unmount(id); err != nil { utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } return mountpk.Unmount(a.root) }
func (daemon *Daemon) Close() error { errorsStrings := []string{} if err := daemon.shutdown(); err != nil { utils.Errorf("daemon.shutdown(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := daemon.driver.Cleanup(); err != nil { utils.Errorf("daemon.driver.Cleanup(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := daemon.containerGraph.Close(); err != nil { utils.Errorf("daemon.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } return nil }
// GetSize, return real size, virtual size func (container *Container) GetSize() (int64, int64) { var ( sizeRw, sizeRootfs int64 err error driver = container.daemon.driver ) if err := container.Mount(); err != nil { utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() if differ, ok := container.daemon.driver.(graphdriver.Differ); ok { sizeRw, err = differ.DiffSize(container.ID) if err != nil { utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } } else { changes, _ := container.Changes() if changes != nil { sizeRw = archive.ChangesSize(container.basefs, changes) } else { sizeRw = -1 } } if _, err = os.Stat(container.basefs); err != nil { if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { sizeRootfs = -1 } } return sizeRw, sizeRootfs }
func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) defer utils.Debugf("[devmapper] deactivateDevice END") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(info); err != nil { utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err) } devinfo, err := getInfo(info.Name()) if err != nil { return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { return err } } return nil }
// Write writes bytes to all writers. Failed writers will be evicted during // this call. func (w *BroadcastWriter) Write(p []byte) (n int, err error) { created := time.Now().UTC() w.Lock() if writers, ok := w.streams[""]; ok { for sw := range writers { if n, err := sw.Write(p); err != nil || n != len(p) { // On error, evict the writer delete(writers, sw) } } } w.buf.Write(p) for { line, err := w.buf.ReadString('\n') if err != nil { w.buf.Write([]byte(line)) break } for stream, writers := range w.streams { if stream == "" { continue } b, err := json.Marshal(utils.JSONLog{Log: line, Stream: stream, Created: created}) if err != nil { utils.Errorf("Error making JSON log line: %s", err) continue } b = append(b, '\n') for sw := range writers { if _, err := sw.Write(b); err != nil { delete(writers, sw) } } } } w.Unlock() return len(p), nil }
func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if contentType := r.Header.Get("Content-Type"); api.MatchesContentType(contentType, "application/json") { if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } origResource := copyData.Get("Resource") if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) if strings.Contains(err.Error(), "No such container") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) } } return nil }
// Unmount and remove the dir information func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if a.active[id] != 0 { utils.Errorf("Warning: removing active id %s\n", id) } // Make sure the dir is umounted first if err := a.unmount(id); err != nil { return err } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil }
func httpError(w http.ResponseWriter, err error) { statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. if strings.Contains(err.Error(), "No such") { statusCode = http.StatusNotFound } else if strings.Contains(err.Error(), "Bad parameter") { statusCode = http.StatusBadRequest } else if strings.Contains(err.Error(), "Conflict") { statusCode = http.StatusConflict } else if strings.Contains(err.Error(), "Impossible") { statusCode = http.StatusNotAcceptable } else if strings.Contains(err.Error(), "Wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(err.Error(), "hasn't been activated") { statusCode = http.StatusForbidden } if err != nil { utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) http.Error(w, err.Error(), statusCode) } }
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } tw := tar.NewWriter(compressWriter) go func() { // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this if options.Includes == nil { options.Includes = []string{"."} } twBuf := bufio.NewWriterSize(nil, twBufSize) for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil { return nil } for _, exclude := range options.Excludes { matched, err := filepath.Match(exclude, relFilePath) if err != nil { utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) return err } if matched { utils.Debugf("Skipping excluded path: %s", relFilePath) if f.IsDir() { return filepath.SkipDir } return nil } } if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err) } return nil }) } // Make sure to check the error on Close. if err := tw.Close(); err != nil { utils.Debugf("Can't close tar writer: %s\n", err) } if err := compressWriter.Close(); err != nil { utils.Debugf("Can't close compress writer: %s\n", err) } if err := pipeWriter.Close(); err != nil { utils.Debugf("Can't close pipe writer: %s\n", err) } }() return pipeReader, nil }
func (d *Driver) Put(id string) { if err := d.DeviceSet.UnmountDevice(id); err != nil { utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) } }
func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] logs = job.GetenvBool("logs") stream = job.GetenvBool("stream") stdin = job.GetenvBool("stdin") stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") ) container := srv.daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } //logs if logs { cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs utils.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { utils.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { utils.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { utils.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { utils.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { utils.Errorf("Error reading logs (json): %s", err) } else { dec := json.NewDecoder(cLog) for { l := &utils.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { utils.Errorf("Error streaming logs: %s", err) break } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", l.Log) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", l.Log) } } } } //stream if stream { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer cStdinCloser io.Closer ) if stdin { r, w := io.Pipe() go func() { defer w.Close() defer utils.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r cStdinCloser = job.Stdin } if stdout { cStdout = job.Stdout } if stderr { cStderr = job.Stderr } <-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { container.State.WaitStop(-1 * time.Second) } } return engine.StatusOK }
func (container *Container) LogEvent(action string) { d := container.daemon if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil { utils.Errorf("Error running container: %s", err) } }
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = time.RFC3339Nano } if tail == "" { tail = "all" } container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs utils.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { utils.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { utils.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { utils.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { utils.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { utils.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) for { l := &utils.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { utils.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", logLine) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", logLine) } } } } if follow { errors := make(chan error, 2) if stdout { stdoutPipe := container.StdoutLogPipe() go func() { errors <- utils.WriteLog(stdoutPipe, job.Stdout, format) }() } if stderr { stderrPipe := container.StderrLogPipe() go func() { errors <- utils.WriteLog(stderrPipe, job.Stderr, format) }() } err := <-errors if err != nil { utils.Errorf("%s", err) } } return engine.StatusOK }
func (daemon *Daemon) restore() error { var ( debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") containers = make(map[string]*Container) currentDriver = daemon.driver.String() containersToStart = []*Container{} ) if !debug { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { return err } for _, v := range dir { id := v.Name() container, err := daemon.load(id) if !debug { fmt.Print(".") } if err != nil { utils.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { utils.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } if entities := daemon.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if !debug { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { if err := daemon.register(container, false, &containersToStart); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links container.Name, err = daemon.generateNewName(container.ID) if err != nil { utils.Debugf("Setting default id - %s", err) } if err := daemon.register(container, false, &containersToStart); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } } for _, container := range containersToStart { utils.Debugf("Starting container %d", container.ID) if err := container.Start(); err != nil { utils.Debugf("Failed to start container %s: %s", container.ID, err) } } if !debug { fmt.Printf(": done.\n") } return nil }