예제 #1
0
func ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
	)
	v := url.Values{}

	if job.EnvExists("t") {
		v.Set("t", strconv.Itoa(job.GetenvInt("t")))
	}

	ship := shipWithContainerId(job, name)

	cli := client.NewKraneClientApi(*ship, false, job)

	if ship != nil {
		body, statusCode, err := readBody(cli.Call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
		if statusCode == 304 {
			return job.Errorf("Container already stopped")
		} else if statusCode == 404 {
			return job.Errorf("No such container: %s\n", name)
		} else if statusCode == 404 {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		} else if (statusCode >= 200) && (statusCode < 300) {
			fmt.Printf("%s", body)
		} else {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #2
0
func ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
	)
	v := url.Values{}

	if job.EnvExists("t") {
		v.Set("t", strconv.Itoa(job.GetenvInt("t")))
	}

	ship := shipWithContainerId(job, name)

	if ship != nil {
		_, statusCode, err := httpRequest("POST", "http", ship.Fqdn, ship.Port, "/containers/"+name+"/restart?"+v.Encode(), nil, false)
		if statusCode == 404 {
			return job.Errorf("No such container: %s\n", name)
		} else if statusCode == 500 {
			return job.Errorf("Server error trying to restart %s: %s\n", name, err)
		} else if (statusCode >= 200) && (statusCode < 300) {
			fmt.Printf("Container %s restarted\n", name)
		} else {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #3
0
파일: create.go 프로젝트: hgschmie/docker
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
	var name string
	if len(job.Args) == 1 {
		name = job.Args[0]
	} else if len(job.Args) > 1 {
		return job.Errorf("Usage: %s", job.Name)
	}
	config := runconfig.ContainerConfigFromJob(job)
	if config.Memory != 0 && config.Memory < 4194304 {
		return job.Errorf("Minimum memory limit allowed is 4MB")
	}
	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
		config.Memory = 0
	}
	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
		config.MemorySwap = -1
	}
	if config.Memory > 0 && config.MemorySwap > 0 && config.MemorySwap < config.Memory {
		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
	}
	if config.Memory == 0 && config.MemorySwap > 0 {
		return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
	}

	var hostConfig *runconfig.HostConfig
	if job.EnvExists("HostConfig") {
		hostConfig = runconfig.ContainerHostConfigFromJob(job)
	} else {
		// Older versions of the API don't provide a HostConfig.
		hostConfig = nil
	}

	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
	if err != nil {
		if daemon.Graph().IsNotExist(err) {
			_, tag := parsers.ParseRepositoryTag(config.Image)
			if tag == "" {
				tag = graph.DEFAULTTAG
			}
			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
		}
		return job.Error(err)
	}
	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
		job.Errorf("IPv4 forwarding is disabled.\n")
	}
	container.LogEvent("create")

	job.Printf("%s\n", container.ID)

	for _, warning := range buildWarnings {
		job.Errorf("%s\n", warning)
	}

	return engine.StatusOK
}
예제 #4
0
파일: create.go 프로젝트: prologic/docker
func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
	var name string
	if len(job.Args) == 1 {
		name = job.Args[0]
	} else if len(job.Args) > 1 {
		return job.Errorf("Usage: %s", job.Name)
	}
	config := runconfig.ContainerConfigFromJob(job)
	if config.Memory != 0 && config.Memory < 4194304 {
		return job.Errorf("Minimum memory limit allowed is 4MB")
	}
	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
		config.Memory = 0
	}
	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
		config.MemorySwap = -1
	}

	var hostConfig *runconfig.HostConfig
	if job.EnvExists("HostConfig") {
		hostConfig = runconfig.ContainerHostConfigFromJob(job)
	} else {
		// Older versions of the API don't provide a HostConfig.
		hostConfig = nil
	}

	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
	if err != nil {
		if daemon.Graph().IsNotExist(err) {
			_, tag := parsers.ParseRepositoryTag(config.Image)
			if tag == "" {
				tag = graph.DEFAULTTAG
			}
			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
		}
		return job.Error(err)
	}
	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
		job.Errorf("IPv4 forwarding is disabled.\n")
	}
	container.LogEvent("create")
	// FIXME: this is necessary because daemon.Create might return a nil container
	// with a non-nil error. This should not happen! Once it's fixed we
	// can remove this workaround.
	if container != nil {
		job.Printf("%s\n", container.ID)
	}
	for _, warning := range buildWarnings {
		job.Errorf("%s\n", warning)
	}

	return engine.StatusOK
}
예제 #5
0
파일: hostconfig.go 프로젝트: viirya/docker
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
	if job.EnvExists("HostConfig") {
		hostConfig := HostConfig{}
		job.GetenvJson("HostConfig", &hostConfig)
		return &hostConfig
	}

	hostConfig := &HostConfig{
		ContainerIDFile: job.Getenv("ContainerIDFile"),
		Privileged:      job.GetenvBool("Privileged"),
		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
		IpcMode:         IpcMode(job.Getenv("IpcMode")),
		PidMode:         PidMode(job.Getenv("PidMode")),
		ReadonlyRootfs:  job.GetenvBool("ReadonlyRootfs"),
	}

	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
	job.GetenvJson("Devices", &hostConfig.Devices)
	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)

	job.GetenvJson("Ulimits", &hostConfig.Ulimits)

	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
	if Binds := job.GetenvList("Binds"); Binds != nil {
		hostConfig.Binds = Binds
	}
	if Links := job.GetenvList("Links"); Links != nil {
		hostConfig.Links = Links
	}
	if Dns := job.GetenvList("Dns"); Dns != nil {
		hostConfig.Dns = Dns
	}
	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
		hostConfig.DnsSearch = DnsSearch
	}
	if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil {
		hostConfig.ExtraHosts = ExtraHosts
	}
	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
		hostConfig.VolumesFrom = VolumesFrom
	}
	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
		hostConfig.CapAdd = CapAdd
	}
	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
		hostConfig.CapDrop = CapDrop
	}

	return hostConfig
}
예제 #6
0
func (s *TagStore) CmdDiffAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 3 {
		return job.Errorf("Usage : %s CONTAINERID SRCIMAGEID TAGIMAGEID", job.Name)
	}

	var (
		containerID   = job.Args[0]
		localName     = job.Args[1]
		parentImageID = job.Args[2]
		sf            = utils.NewStreamFormatter(job.GetenvBool("json"))
		rate          = 0 // the rate of image layer data is written to the container per second
	)
	if job.EnvExists("rate") {
		rate = job.GetenvInt("rate")
	}

	img, err := s.LookupImage(localName)
	if err != nil {
		return job.Error(err)
	}

	dest := s.graph.Driver().MountPath(containerID)
	fi, err := os.Stat(dest)
	if err != nil && !os.IsExist(err) {
		return job.Error(err)
	}
	if !fi.IsDir() {
		return job.Errorf(" Dest %s is not dir", dest)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Diff two mirrors(%s - %s)", img.ID, parentImageID), nil))
	fs, err := s.graph.Driver().Diff(img.ID, parentImageID, nil)
	if err != nil {
		return job.Error(err)
	}
	defer fs.Close()
	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
	err = archive.ApplyLayer(dest, fs, int64(rate))
	if err != nil {
		return job.Error(err)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))
	return engine.StatusOK
}
예제 #7
0
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if err := container.Restart(int(t)); err != nil {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
		job.Eng.Job("log", "restart", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #8
0
func (daemon *Daemon) ContainerRestart(job *engine.Job) error {
	if len(job.Args) != 1 {
		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	container, err := daemon.Get(name)
	if err != nil {
		return err
	}
	if err := container.Restart(int(t)); err != nil {
		return fmt.Errorf("Cannot restart container %s: %s\n", name, err)
	}
	container.LogEvent("restart")
	return nil
}
예제 #9
0
파일: restart.go 프로젝트: Gandi/docker
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if err := container.Restart(int(t)); err != nil {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
		container.LogEvent("restart")
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #10
0
func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := srv.daemon.Get(name); container != nil {
		if !container.State.IsRunning() {
			return job.Errorf("Container already stopped")
		}
		if err := container.Stop(int(t)); err != nil {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
		srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #11
0
파일: stop.go 프로젝트: BreezeWu/docker
func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if !container.IsRunning() {
			return job.Errorf("Container already stopped")
		}
		if err := container.Stop(int(t)); err != nil {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
		container.LogEvent("stop")
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
예제 #12
0
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
	if job.EnvExists("HostConfig") {
		hostConfig := HostConfig{}
		job.GetenvJson("HostConfig", &hostConfig)

		// FIXME: These are for backward compatibility, if people use these
		// options with `HostConfig`, we should still make them workable.
		if job.EnvExists("Memory") && hostConfig.Memory == 0 {
			hostConfig.Memory = job.GetenvInt64("Memory")
		}
		if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 {
			hostConfig.MemorySwap = job.GetenvInt64("MemorySwap")
		}
		if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 {
			hostConfig.CpuShares = job.GetenvInt64("CpuShares")
		}
		if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
			hostConfig.CpusetCpus = job.Getenv("Cpuset")
		}

		return &hostConfig
	}

	hostConfig := &HostConfig{
		ContainerIDFile: job.Getenv("ContainerIDFile"),
		Memory:          job.GetenvInt64("Memory"),
		MemorySwap:      job.GetenvInt64("MemorySwap"),
		CpuShares:       job.GetenvInt64("CpuShares"),
		CpusetCpus:      job.Getenv("CpusetCpus"),
		Privileged:      job.GetenvBool("Privileged"),
		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
		IpcMode:         IpcMode(job.Getenv("IpcMode")),
		PidMode:         PidMode(job.Getenv("PidMode")),
		ReadonlyRootfs:  job.GetenvBool("ReadonlyRootfs"),
		CgroupParent:    job.Getenv("CgroupParent"),
	}

	// FIXME: This is for backward compatibility, if people use `Cpuset`
	// in json, make it workable, we will only pass hostConfig.CpusetCpus
	// to execDriver.
	if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
		hostConfig.CpusetCpus = job.Getenv("Cpuset")
	}

	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
	job.GetenvJson("Devices", &hostConfig.Devices)
	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
	job.GetenvJson("Ulimits", &hostConfig.Ulimits)
	job.GetenvJson("LogConfig", &hostConfig.LogConfig)
	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
	if Binds := job.GetenvList("Binds"); Binds != nil {
		hostConfig.Binds = Binds
	}
	if Links := job.GetenvList("Links"); Links != nil {
		hostConfig.Links = Links
	}
	if Dns := job.GetenvList("Dns"); Dns != nil {
		hostConfig.Dns = Dns
	}
	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
		hostConfig.DnsSearch = DnsSearch
	}
	if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil {
		hostConfig.ExtraHosts = ExtraHosts
	}
	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
		hostConfig.VolumesFrom = VolumesFrom
	}
	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
		hostConfig.CapAdd = CapAdd
	}
	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
		hostConfig.CapDrop = CapDrop
	}

	return hostConfig
}
예제 #13
0
func (s *TagStore) CmdPullAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 4 {
		return job.Errorf("Usage: %s CONTAINERID CONTAINERIMAGE IMAGE TAG", job.Name)
	}

	var (
		containerID    = job.Args[0]
		containerImage = job.Args[1]
		localName      = job.Args[2]
		tag            = job.Args[3]
		sf             = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig     = &registry.AuthConfig{}
		metaHeaders    map[string][]string
		mirrors        []string
		rate           = 0
	)
	if job.EnvExists("rate") {
		rate = job.GetenvInt("rate")
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	for {
		c, err := s.poolAdd("pull", localName+":"+tag)
		if err != nil {
			if c != nil {
				// Another pull of the same repository is already taking place; just wait for it to finish
				job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
				<-c
				continue
			} else {
				return job.Error(err)
			}
		}
		break
	}
	defer s.poolRemove("pull", localName+":"+tag)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries)
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	if endpoint.VersionString(1) == registry.IndexServerAddress() {
		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
		localName = remoteName
		isOfficial := isOfficialName(remoteName)
		if isOfficial && strings.IndexRune(remoteName, '/') == -1 {
			remoteName = "library/" + remoteName
		}
	}
	// Use provided mirrors, if any
	mirrors = s.mirrors

	if err = s.pullMRepository(r, job.Stdout, containerID, containerImage, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors, rate); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}