// Commit will create a new image from a container's changes func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) { container, err := d.Get(name) if err != nil { return "", err } if c.Config == nil { c.Config = &runconfig.Config{} } newConfig, err := BuildFromConfig(d, c.Config, c.Changes) if err != nil { return "", err } if err := runconfig.Merge(newConfig, container.Config); err != nil { return "", err } commitCfg := &daemon.ContainerCommitConfig{ Pause: c.Pause, Repo: c.Repo, Tag: c.Tag, Author: c.Author, Comment: c.Comment, Config: newConfig, } img, err := d.Commit(container, commitCfg) if err != nil { return "", err } return img.ID, nil }
// Commit will create a new image from a container's changes // TODO: remove daemon, make Commit a method on *Builder ? func Commit(container *daemon.Container, d *daemon.Daemon, c *CommitConfig) (string, error) { // It is not possible to commit a running container on Windows if runtime.GOOS == "windows" && container.IsRunning() { return "", fmt.Errorf("Windows does not support commit of a running container") } if c.Config == nil { c.Config = &runconfig.Config{} } newConfig, err := BuildFromConfig(c.Config, c.Changes) if err != nil { return "", err } if err := runconfig.Merge(newConfig, container.Config); err != nil { return "", err } commitCfg := &daemon.ContainerCommitConfig{ Pause: c.Pause, Repo: c.Repo, Tag: c.Tag, Author: c.Author, Comment: c.Comment, Config: newConfig, } img, err := d.Commit(container, commitCfg) if err != nil { return "", err } return img.ID, nil }
// Create a test container from the given daemon `r` and run arguments `args`. // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { config, hc, _, err := parseRun(args, nil) defer func() { if err != nil && t != nil { t.Fatal(err) } }() if err != nil { return nil, nil, err } if config.Image == "_" { config.Image = GetTestImage(r).ID } c, _, err := r.Create(config, "") if err != nil { return nil, nil, err } // NOTE: hostConfig is ignored. // If `args` specify privileged mode, custom lxc conf, external mount binds, // port redirects etc. they will be ignored. // This is because the correct way to set these things is to pass environment // to the `start` job. // FIXME: this helper function should be deprecated in favor of calling // `create` and `start` jobs directly. return c, hc, nil }
// Create a test container, start it, wait for it to complete, destroy it, // and return its standard output as a string. // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) { defer func() { if err != nil && t != nil { t.Fatal(err) } }() container, hc, err := mkContainer(r, args, t) if err != nil { return "", err } defer r.Destroy(container) stdout, err := container.StdoutPipe() if err != nil { return "", err } defer stdout.Close() job := eng.Job("start", container.ID) if err := job.ImportEnv(hc); err != nil { return "", err } if err := job.Run(); err != nil { return "", err } container.WaitStop(-1 * time.Second) data, err := ioutil.ReadAll(stdout) if err != nil { return "", err } output = string(data) return }
// Commit will create a new image from a container's changes // TODO: remove daemon, make Commit a method on *Builder ? func Commit(containerName string, d *daemon.Daemon, c *CommitConfig) (string, error) { if c.Config == nil { c.Config = &runconfig.Config{} } newConfig, err := BuildFromConfig(c.Config, c.Changes) if err != nil { return "", err } commitCfg := &daemon.ContainerCommitConfig{ Pause: c.Pause, Repo: c.Repo, Tag: c.Tag, Author: c.Author, Comment: c.Comment, Config: newConfig, MergeConfigs: true, } imgID, err := d.Commit(containerName, commitCfg) if err != nil { return "", err } return imgID, nil }
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { decoder := runconfig.ContainerDecoder{} routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(d, decoder), container.NewRouter(d, decoder), image.NewRouter(d, decoder), systemrouter.NewRouter(d, c), volume.NewRouter(d), build.NewRouter(dockerfile.NewBuildManager(d)), swarmrouter.NewRouter(c), pluginrouter.NewRouter(plugin.GetManager()), } if d.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(d, c)) } if d.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } s.InitRouter(debug.IsEnabled(), routers...) }
func Commit(d *daemon.Daemon, name string, c *daemon.ContainerCommitConfig) (string, error) { container, err := d.Get(name) if err != nil { return "", err } if c.Config == nil { c.Config = &runconfig.Config{} } newConfig, err := BuildFromConfig(d, c.Config, c.Changes) if err != nil { return "", err } if err := runconfig.Merge(newConfig, container.Config); err != nil { return "", err } img, err := d.Commit(container, c.Repo, c.Tag, c.Comment, c.Author, c.Pause, newConfig) if err != nil { return "", err } return img.ID, nil }
// NewRouter initializes a new build router func NewRouter(d *daemon.Daemon, b Backend) router.Router { r := &swarmRouter{ backend: b, } r.initRoutes() if d.HasExperimental() { r.addExperimentalRoutes() } return r }
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() select { case <-ch: logrus.Debug("Clean shutdown succeded") case <-time.After(timeout * time.Second): logrus.Error("Force shutdown daemon") } }
func GetTestImage(daemon *daemon.Daemon) *image.Image { imgs, err := daemon.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) } for _, image := range imgs { if image.ID == unitTestImageID { return image } } log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) return nil }
func initRouter(s *apiserver.Server, d *daemon.Daemon) { routers := []router.Router{ container.NewRouter(d), image.NewRouter(d), systemrouter.NewRouter(d), volume.NewRouter(d), build.NewRouter(dockerfile.NewBuildManager(d)), } if d.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(d)) } s.InitRouter(utils.IsDebugEnabled(), routers...) }
func initRouter(s *apiserver.Server, d *daemon.Daemon) { decoder := runconfig.ContainerDecoder{} routers := []router.Router{ container.NewRouter(d, decoder), image.NewRouter(d, decoder), systemrouter.NewRouter(d), volume.NewRouter(d), build.NewRouter(d), } if d.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(d)) } s.InitRouter(utils.IsDebugEnabled(), routers...) }
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { decoder := runconfig.ContainerDecoder{} routers := []router.Router{ container.NewRouter(d, decoder), image.NewRouter(d, decoder), systemrouter.NewRouter(d, c), volume.NewRouter(d), build.NewRouter(dockerfile.NewBuildManager(d)), swarmrouter.NewRouter(c), } if d.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(d, c)) } routers = addExperimentalRouters(routers) s.InitRouter(utils.IsDebugEnabled(), routers...) }
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-time.After(time.Duration(shutdownTimeout) * time.Second): logrus.Error("Force shutdown daemon") } }
// NewRouter initializes a new network router func NewRouter(d *daemon.Daemon) router.Router { c := d.NetworkController() if c == nil { return networkRouter{} } var routes []router.Route netHandler := api.NewHTTPHandler(c) // TODO: libnetwork should stop hijacking request/response. // It should define API functions to add normally to the router. handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { netHandler(w, r) return nil } for _, path := range []string{"/networks", "/services", "/sandboxes"} { routes = append(routes, networkRoute{path, handler}) } return networkRouter{routes} }
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { decoder := runconfig.ContainerDecoder{} routers := []router.Router{} // we need to add the checkpoint router before the container router or the DELETE gets masked routers = addExperimentalRouters(routers, d, decoder) routers = append(routers, []router.Router{ container.NewRouter(d, decoder), image.NewRouter(d, decoder), systemrouter.NewRouter(d, c), volume.NewRouter(d), build.NewRouter(dockerfile.NewBuildManager(d)), swarmrouter.NewRouter(c), }...) if d.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(d, c)) } s.InitRouter(utils.IsDebugEnabled(), routers...) }
func addExperimentalRouters(routers []router.Router, d *daemon.Daemon, decoder httputils.ContainerDecoder) []router.Router { if !d.HasExperimental() { return []router.Router{} } return append(routers, checkpointrouter.NewRouter(d, decoder)) }
func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewJSONStreamFormatter() builder := &Builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfig: buildConfig.AuthConfig, ConfigFile: buildConfig.ConfigFile, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CpuShares, cpuPeriod: buildConfig.CpuPeriod, cpuQuota: buildConfig.CpuQuota, cpuSetCpus: buildConfig.CpuSetCpus, cpuSetMems: buildConfig.CpuSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
// FIXME: nuke() is deprecated by Daemon.Nuke() func nuke(daemon *daemon.Daemon) error { return daemon.Nuke() }
// Build is the main interface of the package, it gathers the Builder // struct and calls builder.Run() to do all the real build job. func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) sf := streamformatter.NewJSONStreamFormatter() repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err) } defer f.Body.Close() ct := f.Header.Get("Content-Type") clen := int(f.ContentLength) contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) defer bodyReader.Close() if err != nil { return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err) } if contentType == httputils.MimeTypes.TextPlain { dockerFile, err := ioutil.ReadAll(bodyReader) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } else { // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. prCfg := progressreader.Config{ In: bodyReader, Out: buildConfig.Stdout, Formatter: sf, Size: clen, NewLines: true, ID: "Downloading context", Action: buildConfig.RemoteURL, } context = progressreader.New(prCfg) } } defer context.Close() builder := &builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfigs: buildConfig.AuthConfigs, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CPUShares, cpuPeriod: buildConfig.CPUPeriod, cpuQuota: buildConfig.CPUQuota, cpuSetCpus: buildConfig.CPUSetCpus, cpuSetMems: buildConfig.CPUSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), id: stringid.GenerateRandomID(), } defer func() { builder.Daemon.Graph().Release(builder.id, builder.activeImages...) }() id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }