func (srv *Server) ContainerStart(job *engine.Job) string { if len(job.Args) < 1 { return fmt.Sprintf("Usage: %s container_id", job.Name) } name := job.Args[0] runtime := srv.runtime container := runtime.Get(name) if container == nil { return fmt.Sprintf("No such container: %s", name) } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { var hostConfig HostConfig if err := job.ExportEnv(&hostConfig); err != nil { return err.Error() } // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. // 2) Check that the source exists // The source to be bind mounted must exist. for _, bind := range hostConfig.Binds { splitBind := strings.Split(bind, ":") source := splitBind[0] // refuse to bind mount "/" to the container if source == "/" { return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind) } // ensure the source exists on the host _, err := os.Stat(source) if err != nil && os.IsNotExist(err) { return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind) } } // Register any links from the host config before starting the container // FIXME: we could just pass the container here, no need to lookup by name again. if err := srv.RegisterLinks(name, &hostConfig); err != nil { return err.Error() } container.hostConfig = &hostConfig container.ToDisk() } if err := container.Start(); err != nil { return fmt.Sprintf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) return "0" }