Exemplo n.º 1
0
// FIXME: Allow to interrupt current push when new push of same image is done.
func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s IMAGE", job.Name)
	}
	var (
		localName   = job.Args[0]
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)

	// Resolve the Repository name from fqn to RepositoryInfo
	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
	if err != nil {
		return job.Error(err)
	}

	tag := job.Getenv("tag")
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
		return job.Error(err)
	}
	defer s.poolRemove("push", repoInfo.LocalName)

	endpoint, err := repoInfo.GetEndpoint()
	if err != nil {
		return job.Error(err)
	}

	img, err := s.graph.Get(repoInfo.LocalName)
	r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
	if err2 != nil {
		return job.Error(err2)
	}

	if err != nil {
		reposLen := 1
		if tag == "" {
			reposLen = len(s.Repositories[repoInfo.LocalName])
		}
		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen))
		// If it fails, try to get the repository
		if localRepo, exists := s.Repositories[repoInfo.LocalName]; exists {
			if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil {
				return job.Error(err)
			}
			return engine.StatusOK
		}
		return job.Error(err)
	}

	var token []string
	job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", repoInfo.CanonicalName))
	if _, err := s.pushImage(r, job.Stdout, img.ID, endpoint.String(), token, sf); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 2
0
// Search queries the public registry for images matching the specified
// search terms, and returns the results.
//
// Argument syntax: search TERM
//
// Option environment:
//	'authConfig': json-encoded credentials to authenticate against the registry.
//		The search extends to images only accessible via the credentials.
//
//	'metaHeaders': extra HTTP headers to include in the request to the registry.
//		The headers should be passed as a json-encoded dictionary.
//
// Output:
//	Results are sent as a collection of structured messages (using engine.Table).
//	Each result is sent as a separate message.
//	Results are ordered by number of stars on the public registry.
func (s *Service) Search(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s TERM", job.Name)
	}
	var (
		term        = job.Args[0]
		metaHeaders = map[string][]string{}
		authConfig  = &AuthConfig{}
	)
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", metaHeaders)

	r, err := NewRegistry(authConfig, HTTPRequestFactory(metaHeaders), IndexServerAddress(), true)
	if err != nil {
		return job.Error(err)
	}
	results, err := r.SearchRepositories(term)
	if err != nil {
		return job.Error(err)
	}
	outs := engine.NewTable("star_count", 0)
	for _, result := range results.Results {
		out := &engine.Env{}
		out.Import(result)
		outs.Add(out)
	}
	outs.ReverseSort()
	if _, err := outs.WriteListTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 3
0
func ContainerConfigFromJob(job *engine.Job) *Config {
	config := &Config{
		Hostname:        job.Getenv("Hostname"),
		Domainname:      job.Getenv("Domainname"),
		User:            job.Getenv("User"),
		Memory:          job.GetenvInt64("Memory"),
		MemorySwap:      job.GetenvInt64("MemorySwap"),
		CpuShares:       job.GetenvInt64("CpuShares"),
		Cpuset:          job.Getenv("Cpuset"),
		AttachStdin:     job.GetenvBool("AttachStdin"),
		AttachStdout:    job.GetenvBool("AttachStdout"),
		AttachStderr:    job.GetenvBool("AttachStderr"),
		Tty:             job.GetenvBool("Tty"),
		OpenStdin:       job.GetenvBool("OpenStdin"),
		StdinOnce:       job.GetenvBool("StdinOnce"),
		Image:           job.Getenv("Image"),
		WorkingDir:      job.Getenv("WorkingDir"),
		NetworkDisabled: job.GetenvBool("NetworkDisabled"),
	}
	job.GetenvJson("ExposedPorts", &config.ExposedPorts)
	job.GetenvJson("Volumes", &config.Volumes)
	if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil {
		config.PortSpecs = PortSpecs
	}
	if Env := job.GetenvList("Env"); Env != nil {
		config.Env = Env
	}
	if Cmd := job.GetenvList("Cmd"); Cmd != nil {
		config.Cmd = Cmd
	}
	if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil {
		config.Entrypoint = Entrypoint
	}
	return config
}
Exemplo n.º 4
0
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was sucessful.
// It can be used to verify the validity of a client's credentials.
func (s *Service) Auth(job *engine.Job) engine.Status {
	var (
		err        error
		authConfig = &AuthConfig{}
	)

	job.GetenvJson("authConfig", authConfig)
	// TODO: this is only done here because auth and registry need to be merged into one pkg
	if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() {
		endpoint, err := NewEndpoint(addr)
		if err != nil {
			return job.Error(err)
		}
		if _, err := endpoint.Ping(); err != nil {
			return job.Error(err)
		}
		authConfig.ServerAddress = endpoint.String()
	}
	status, err := Login(authConfig, HTTPRequestFactory(nil))
	if err != nil {
		return job.Error(err)
	}
	job.Printf("%s\n", status)
	return engine.StatusOK
}
Exemplo n.º 5
0
func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
	}
	name := job.Args[0]

	container, err := daemon.Get(name)
	if err != nil {
		return job.Error(err)
	}

	var (
		config    = container.Config
		newConfig runconfig.Config
	)

	if err := job.GetenvJson("config", &newConfig); err != nil {
		return job.Error(err)
	}

	if err := runconfig.Merge(&newConfig, config); err != nil {
		return job.Error(err)
	}

	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
	if err != nil {
		return job.Error(err)
	}
	job.Printf("%s\n", img.ID)
	return engine.StatusOK
}
Exemplo n.º 6
0
// FIXME: Allow to interrupt current push when new push of same image is done.
func (srv *Server) ImagePush(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s IMAGE", job.Name)
	}
	var (
		localName   = job.Args[0]
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)

	tag := job.Getenv("tag")
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)
	if _, err := srv.poolAdd("push", localName); err != nil {
		return job.Error(err)
	}
	defer srv.poolRemove("push", localName)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
	if err != nil {
		return job.Error(err)
	}

	img, err := srv.daemon.Graph().Get(localName)
	r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
	if err2 != nil {
		return job.Error(err2)
	}

	if err != nil {
		reposLen := 1
		if tag == "" {
			reposLen = len(srv.daemon.Repositories().Repositories[localName])
		}
		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
		// If it fails, try to get the repository
		if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
			if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
				return job.Error(err)
			}
			return engine.StatusOK
		}
		return job.Error(err)
	}

	var token []string
	job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
	if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 7
0
Arquivo: pull.go Projeto: Gandi/docker
func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 && n != 2 {
		return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
	}
	var (
		localName   = job.Args[0]
		tag         string
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)
	if len(job.Args) > 1 {
		tag = job.Args[1]
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	c, err := s.poolAdd("pull", localName+":"+tag)
	if err != nil {
		if c != nil {
			// Another pull of the same repository is already taking place; just wait for it to finish
			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
			<-c
			return engine.StatusOK
		}
		return job.Error(err)
	}
	defer s.poolRemove("pull", localName+":"+tag)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	if endpoint == registry.IndexServerAddress() {
		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
		localName = remoteName
	}

	if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemplo n.º 8
0
func (daemon *Daemon) ContainerSet(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}

	var (
		name   = job.Args[0]
		config []struct {
			Key   string
			Value string
		}
		err error
	)
	job.GetenvJson("config", &config)
	log.Infof("Setting container: %s. config: %v", name, config)

	container := daemon.Get(name)
	if container == nil {
		return job.Errorf("Can not find container %s", name)
	}
	if !container.State.IsRunning() {
		return job.Errorf("Container %s is not running", name)
	}

	var object []interface{}
	for _, pair := range config {
		var response struct {
			Key    string
			Err    string
			Status int
		}
		response.Key = pair.Key
		if err = setConfig(container, pair.Key, pair.Value); err != nil {
			response.Err = err.Error()
			response.Status = 255
		} else {
			response.Status = 0
		}
		object = append(object, response)
	}

	// save config to disk
	if err := container.ToDisk(); err != nil {
		return job.Error(err)
	}

	b, err := json.Marshal(object)
	if err != nil {
		return job.Error(err)
	}
	job.Stdout.Write(b)

	return engine.StatusOK
}
Exemplo n.º 9
0
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
	if job.EnvExists("HostConfig") {
		hostConfig := HostConfig{}
		job.GetenvJson("HostConfig", &hostConfig)
		return &hostConfig
	}

	hostConfig := &HostConfig{
		ContainerIDFile: job.Getenv("ContainerIDFile"),
		Privileged:      job.GetenvBool("Privileged"),
		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
		RequestedIP:     job.Getenv("RequestedIP"),
		IpcMode:         IpcMode(job.Getenv("IpcMode")),
		PidMode:         PidMode(job.Getenv("PidMode")),
		ReadonlyRootfs:  job.GetenvBool("ReadonlyRootfs"),
	}

	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
	job.GetenvJson("Devices", &hostConfig.Devices)
	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
	if Binds := job.GetenvList("Binds"); Binds != nil {
		hostConfig.Binds = Binds
	}
	if Links := job.GetenvList("Links"); Links != nil {
		hostConfig.Links = Links
	}
	if Dns := job.GetenvList("Dns"); Dns != nil {
		hostConfig.Dns = Dns
	}
	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
		hostConfig.DnsSearch = DnsSearch
	}
	if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil {
		hostConfig.ExtraHosts = ExtraHosts
	}
	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
		hostConfig.VolumesFrom = VolumesFrom
	}
	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
		hostConfig.CapAdd = CapAdd
	}
	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
		hostConfig.CapDrop = CapDrop
	}

	return hostConfig
}
Exemplo n.º 10
0
Arquivo: job.go Projeto: viirya/docker
func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
	if len(job.Args) != 0 {
		return job.Errorf("Usage: %s\n", job.Name)
	}

	var (
		changes   = job.GetenvList("changes")
		newConfig runconfig.Config
	)

	if err := job.GetenvJson("config", &newConfig); err != nil {
		return job.Error(err)
	}

	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
	if err != nil {
		return job.Error(err)
	}

	// ensure that the commands are valid
	for _, n := range ast.Children {
		if !validCommitCommands[n.Value] {
			return job.Errorf("%s is not a valid change command", n.Value)
		}
	}

	builder := &Builder{
		Daemon:        b.Daemon,
		Engine:        b.Engine,
		Config:        &newConfig,
		OutStream:     ioutil.Discard,
		ErrStream:     ioutil.Discard,
		disableCommit: true,
	}

	for i, n := range ast.Children {
		if err := builder.dispatch(i, n); err != nil {
			return job.Error(err)
		}
	}

	if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 11
0
// Search queries the public registry for images matching the specified
// search terms, and returns the results.
//
// Argument syntax: search TERM
//
// Option environment:
//	'authConfig': json-encoded credentials to authenticate against the registry.
//		The search extends to images only accessible via the credentials.
//
//	'metaHeaders': extra HTTP headers to include in the request to the registry.
//		The headers should be passed as a json-encoded dictionary.
//
// Output:
//	Results are sent as a collection of structured messages (using engine.Table).
//	Each result is sent as a separate message.
//	Results are ordered by number of stars on the public registry.
func (s *Service) Search(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s TERM", job.Name)
	}
	var (
		term        = job.Args[0]
		metaHeaders = map[string][]string{}
		authConfig  = &AuthConfig{}
	)
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", metaHeaders)

	repoInfo, err := ResolveRepositoryInfo(job, term)
	if err != nil {
		return job.Error(err)
	}
	// *TODO: Search multiple indexes.
	endpoint, err := repoInfo.GetEndpoint()
	if err != nil {
		return job.Error(err)
	}
	r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}
	results, err := r.SearchRepositories(repoInfo.GetSearchTerm())
	if err != nil {
		return job.Error(err)
	}
	outs := engine.NewTable("star_count", 0)
	for _, result := range results.Results {
		out := &engine.Env{}
		out.Import(result)
		outs.Add(out)
	}
	outs.ReverseSort()
	if _, err := outs.WriteListTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 12
0
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
	hostConfig := &HostConfig{
		ContainerIDFile: job.Getenv("ContainerIDFile"),
		Privileged:      job.GetenvBool("Privileged"),
		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
	}
	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
	job.GetenvJson("Devices", &hostConfig.Devices)
	if Binds := job.GetenvList("Binds"); Binds != nil {
		hostConfig.Binds = Binds
	}
	if Links := job.GetenvList("Links"); Links != nil {
		hostConfig.Links = Links
	}
	if Dns := job.GetenvList("Dns"); Dns != nil {
		hostConfig.Dns = Dns
	}
	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
		hostConfig.DnsSearch = DnsSearch
	}
	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
		hostConfig.VolumesFrom = VolumesFrom
	}
	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
		hostConfig.CapAdd = CapAdd
	}
	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
		hostConfig.CapDrop = CapDrop
	}
	return hostConfig
}
Exemplo n.º 13
0
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was sucessful.
// It can be used to verify the validity of a client's credentials.
func (s *Service) Auth(job *engine.Job) engine.Status {
	var (
		authConfig = new(AuthConfig)
		endpoint   *Endpoint
		index      *IndexInfo
		status     string
		err        error
	)

	job.GetenvJson("authConfig", authConfig)

	addr := authConfig.ServerAddress
	if addr == "" {
		// Use the official registry address if not specified.
		addr = IndexServerAddress()
	}

	if index, err = ResolveIndexInfo(job, addr); err != nil {
		return job.Error(err)
	}

	if endpoint, err = NewEndpoint(index); err != nil {
		log.Errorf("unable to get new registry endpoint: %s", err)
		return job.Error(err)
	}

	authConfig.ServerAddress = endpoint.String()

	if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil {
		log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err)
		return job.Error(err)
	}

	log.Infof("successful registry login for endpoint %s: %s", endpoint, status)
	job.Printf("%s\n", status)

	return engine.StatusOK
}
Exemplo n.º 14
0
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was sucessful.
// It can be used to verify the validity of a client's credentials.
func (s *Service) Auth(job *engine.Job) engine.Status {
	var authConfig = new(AuthConfig)

	job.GetenvJson("authConfig", authConfig)

	if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() {
		endpoint, err := NewEndpoint(addr, s.insecureRegistries)
		if err != nil {
			return job.Error(err)
		}
		if _, err := endpoint.Ping(); err != nil {
			return job.Error(err)
		}
		authConfig.ServerAddress = endpoint.String()
	}

	status, err := Login(authConfig, HTTPRequestFactory(nil))
	if err != nil {
		return job.Error(err)
	}
	job.Printf("%s\n", status)

	return engine.StatusOK
}
Exemplo n.º 15
0
func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 && n != 2 {
		return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
	}

	var (
		localName   = job.Args[0]
		tag         string
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)

	// Resolve the Repository name from fqn to RepositoryInfo
	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
	if err != nil {
		return job.Error(err)
	}

	if len(job.Args) > 1 {
		tag = job.Args[1]
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	c, err := s.poolAdd("pull", repoInfo.LocalName+":"+tag)
	if err != nil {
		if c != nil {
			// Another pull of the same repository is already taking place; just wait for it to finish
			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
			<-c
			return engine.StatusOK
		}
		return job.Error(err)
	}
	defer s.poolRemove("pull", repoInfo.LocalName+":"+tag)

	log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
	endpoint, err := repoInfo.GetEndpoint()
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	logName := repoInfo.LocalName
	if tag != "" {
		logName += ":" + tag
	}

	if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
		j := job.Eng.Job("trust_update_base")
		if err = j.Run(); err != nil {
			log.Errorf("error updating trust base graph: %s", err)
		}

		log.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
		if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil {
			if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
				log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
			}
			return engine.StatusOK
		} else if err != registry.ErrDoesNotExist {
			log.Errorf("Error from V2 registry: %s", err)
		}

		log.Debug("image does not exist on v2 registry, falling back to v1")
	}

	log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
	if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil {
		return job.Error(err)
	}

	if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
		log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
	}

	return engine.StatusOK
}
Exemplo n.º 16
0
Arquivo: job.go Projeto: laktek/docker
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
	if len(job.Args) != 0 {
		return job.Errorf("Usage: %s\n", job.Name)
	}
	var (
		remoteURL      = job.Getenv("remote")
		repoName       = job.Getenv("t")
		suppressOutput = job.GetenvBool("q")
		noCache        = job.GetenvBool("nocache")
		rm             = job.GetenvBool("rm")
		forceRm        = job.GetenvBool("forcerm")
		authConfig     = &registry.AuthConfig{}
		configFile     = &registry.ConfigFile{}
		tag            string
		context        io.ReadCloser
	)
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("configFile", configFile)

	repoName, tag = parsers.ParseRepositoryTag(repoName)
	if repoName != "" {
		if _, _, err := registry.ResolveRepositoryName(repoName); err != nil {
			return job.Error(err)
		}
		if len(tag) > 0 {
			if err := graph.ValidateTagName(tag); err != nil {
				return job.Error(err)
			}
		}
	}

	if remoteURL == "" {
		context = ioutil.NopCloser(job.Stdin)
	} else if utils.IsGIT(remoteURL) {
		if !utils.ValidGitTransport(remoteURL) {
			remoteURL = "https://" + remoteURL
		}
		root, err := ioutil.TempDir("", "docker-build-git")
		if err != nil {
			return job.Error(err)
		}
		defer os.RemoveAll(root)

		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
			return job.Errorf("Error trying to use git: %s (%s)", err, output)
		}

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return job.Error(err)
		}
		context = c
	} else if utils.IsURL(remoteURL) {
		f, err := utils.Download(remoteURL)
		if err != nil {
			return job.Error(err)
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			return job.Error(err)
		}
		c, err := archive.Generate("Dockerfile", string(dockerFile))
		if err != nil {
			return job.Error(err)
		}
		context = c
	}
	defer context.Close()

	sf := utils.NewStreamFormatter(job.GetenvBool("json"))

	builder := &Builder{
		Daemon: b.Daemon,
		Engine: b.Engine,
		OutStream: &utils.StdoutFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &utils.StderrFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !suppressOutput,
		UtilizeCache:    !noCache,
		Remove:          rm,
		ForceRemove:     forceRm,
		OutOld:          job.Stdout,
		StreamFormatter: sf,
		AuthConfig:      authConfig,
		AuthConfigFile:  configFile,
	}

	id, err := builder.Run(context)
	if err != nil {
		return job.Error(err)
	}

	if repoName != "" {
		b.Daemon.Repositories().Set(repoName, tag, id, false)
	}
	return engine.StatusOK
}
Exemplo n.º 17
0
// FIXME: Allow to interrupt current push when new push of same image is done.
func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s IMAGE", job.Name)
	}
	var (
		localName   = job.Args[0]
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)

	// Resolve the Repository name from fqn to RepositoryInfo
	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
	if err != nil {
		return job.Error(err)
	}

	tag := job.Getenv("tag")
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
		return job.Error(err)
	}
	defer s.poolRemove("push", repoInfo.LocalName)

	endpoint, err := repoInfo.GetEndpoint()
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
	if err != nil {
		return job.Error(err)
	}

	if endpoint.Version == registry.APIVersion2 {
		err := s.pushV2Repository(r, job.Eng, job.Stdout, repoInfo, tag, sf)
		if err == nil {
			return engine.StatusOK
		}

		if err != ErrV2RegistryUnavailable {
			return job.Errorf("Error pushing to registry: %s", err)
		}
	}

	reposLen := 1
	if tag == "" {
		reposLen = len(s.Repositories[repoInfo.LocalName])
	}
	job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen))
	// If it fails, try to get the repository
	localRepo, exists := s.Repositories[repoInfo.LocalName]
	if !exists {
		return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
	}
	if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK

}
Exemplo n.º 18
0
func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
	if len(job.Args) != 0 {
		return job.Errorf("Usage: %s\n", job.Name)
	}
	var (
		remoteURL      = job.Getenv("remote")
		repoName       = job.Getenv("t")
		suppressOutput = job.GetenvBool("q")
		noCache        = job.GetenvBool("nocache")
		rm             = job.GetenvBool("rm")
		forceRm        = job.GetenvBool("forcerm")
		authConfig     = &registry.AuthConfig{}
		configFile     = &registry.ConfigFile{}
		tag            string
		context        io.ReadCloser
	)
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("configFile", configFile)
	repoName, tag = parsers.ParseRepositoryTag(repoName)

	if remoteURL == "" {
		context = ioutil.NopCloser(job.Stdin)
	} else if utils.IsGIT(remoteURL) {
		if !strings.HasPrefix(remoteURL, "git://") {
			remoteURL = "https://" + remoteURL
		}
		root, err := ioutil.TempDir("", "docker-build-git")
		if err != nil {
			return job.Error(err)
		}
		defer os.RemoveAll(root)

		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
			return job.Errorf("Error trying to use git: %s (%s)", err, output)
		}

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return job.Error(err)
		}
		context = c
	} else if utils.IsURL(remoteURL) {
		f, err := utils.Download(remoteURL)
		if err != nil {
			return job.Error(err)
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			return job.Error(err)
		}
		c, err := archive.Generate("Dockerfile", string(dockerFile))
		if err != nil {
			return job.Error(err)
		}
		context = c
	}
	defer context.Close()

	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
	b := NewBuildFile(daemon, daemon.eng,
		&utils.StdoutFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		&utils.StderrFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
	id, err := b.Build(context)
	if err != nil {
		return job.Error(err)
	}
	if repoName != "" {
		daemon.Repositories().Set(repoName, tag, id, false)
	}
	return engine.StatusOK
}
Exemplo n.º 19
0
func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 && n != 2 {
		return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
	}

	var (
		localName   = job.Args[0]
		tag         string
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
		mirrors     []string
	)

	if len(job.Args) > 1 {
		tag = job.Args[1]
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	c, err := s.poolAdd("pull", localName+":"+tag)
	if err != nil {
		if c != nil {
			// Another pull of the same repository is already taking place; just wait for it to finish
			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
			<-c
			return engine.StatusOK
		}
		return job.Error(err)
	}
	defer s.poolRemove("pull", localName+":"+tag)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries)
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	var isOfficial bool
	if endpoint.VersionString(1) == registry.IndexServerAddress() {
		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
		localName = remoteName

		isOfficial = isOfficialName(remoteName)
		if isOfficial && strings.IndexRune(remoteName, '/') == -1 {
			remoteName = "library/" + remoteName
		}
	}
	// Use provided mirrors, if any
	mirrors = s.mirrors

	if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) {
		j := job.Eng.Job("trust_update_base")
		if err = j.Run(); err != nil {
			return job.Errorf("error updating trust base graph: %s", err)
		}

		if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil {
			return engine.StatusOK
		} else if err != registry.ErrDoesNotExist {
			log.Errorf("Error from V2 registry: %s", err)
		}
	}

	if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemplo n.º 20
0
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
	if job.EnvExists("HostConfig") {
		hostConfig := HostConfig{}
		job.GetenvJson("HostConfig", &hostConfig)

		// FIXME: These are for backward compatibility, if people use these
		// options with `HostConfig`, we should still make them workable.
		if job.EnvExists("Memory") && hostConfig.Memory == 0 {
			hostConfig.Memory = job.GetenvInt64("Memory")
		}
		if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 {
			hostConfig.MemorySwap = job.GetenvInt64("MemorySwap")
		}
		if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 {
			hostConfig.CpuShares = job.GetenvInt64("CpuShares")
		}
		if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
			hostConfig.CpusetCpus = job.Getenv("Cpuset")
		}

		return &hostConfig
	}

	hostConfig := &HostConfig{
		ContainerIDFile: job.Getenv("ContainerIDFile"),
		Memory:          job.GetenvInt64("Memory"),
		MemorySwap:      job.GetenvInt64("MemorySwap"),
		CpuShares:       job.GetenvInt64("CpuShares"),
		CpusetCpus:      job.Getenv("CpusetCpus"),
		Privileged:      job.GetenvBool("Privileged"),
		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
		IpcMode:         IpcMode(job.Getenv("IpcMode")),
		PidMode:         PidMode(job.Getenv("PidMode")),
		ReadonlyRootfs:  job.GetenvBool("ReadonlyRootfs"),
		CgroupParent:    job.Getenv("CgroupParent"),
	}

	// FIXME: This is for backward compatibility, if people use `Cpuset`
	// in json, make it workable, we will only pass hostConfig.CpusetCpus
	// to execDriver.
	if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
		hostConfig.CpusetCpus = job.Getenv("Cpuset")
	}

	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
	job.GetenvJson("Devices", &hostConfig.Devices)
	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
	job.GetenvJson("Ulimits", &hostConfig.Ulimits)
	job.GetenvJson("LogConfig", &hostConfig.LogConfig)
	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
	if Binds := job.GetenvList("Binds"); Binds != nil {
		hostConfig.Binds = Binds
	}
	if Links := job.GetenvList("Links"); Links != nil {
		hostConfig.Links = Links
	}
	if Dns := job.GetenvList("Dns"); Dns != nil {
		hostConfig.Dns = Dns
	}
	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
		hostConfig.DnsSearch = DnsSearch
	}
	if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil {
		hostConfig.ExtraHosts = ExtraHosts
	}
	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
		hostConfig.VolumesFrom = VolumesFrom
	}
	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
		hostConfig.CapAdd = CapAdd
	}
	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
		hostConfig.CapDrop = CapDrop
	}

	return hostConfig
}
Exemplo n.º 21
0
func (s *TagStore) CmdPullAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 4 {
		return job.Errorf("Usage: %s CONTAINERID CONTAINERIMAGE IMAGE TAG", job.Name)
	}

	var (
		containerID    = job.Args[0]
		containerImage = job.Args[1]
		localName      = job.Args[2]
		tag            = job.Args[3]
		sf             = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig     = &registry.AuthConfig{}
		metaHeaders    map[string][]string
		mirrors        []string
	)

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	for {
		c, err := s.poolAdd("pull", localName+":"+tag)
		if err != nil {
			if c != nil {
				// Another pull of the same repository is already taking place; just wait for it to finish
				job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
				<-c
				continue
			} else {
				return job.Error(err)
			}
		}
		break
	}
	defer s.poolRemove("pull", localName+":"+tag)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries)
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	if endpoint.VersionString(1) == registry.IndexServerAddress() {
		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
		localName = remoteName
		isOfficial := isOfficialName(remoteName)
		if isOfficial && strings.IndexRune(remoteName, '/') == -1 {
			remoteName = "library/" + remoteName
		}
	}
	// Use provided mirrors, if any
	mirrors = s.mirrors

	if err = s.pullMRepository(r, job.Stdout, containerID, containerImage, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemplo n.º 22
0
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
	if len(job.Args) != 0 {
		return job.Errorf("Usage: %s\n", job.Name)
	}
	var (
		dockerfileName = job.Getenv("dockerfile")
		remoteURL      = job.Getenv("remote")
		repoName       = job.Getenv("t")
		suppressOutput = job.GetenvBool("q")
		noCache        = job.GetenvBool("nocache")
		rm             = job.GetenvBool("rm")
		forceRm        = job.GetenvBool("forcerm")
		pull           = job.GetenvBool("pull")
		memory         = job.GetenvInt64("memory")
		memorySwap     = job.GetenvInt64("memswap")
		cpuShares      = job.GetenvInt64("cpushares")
		cpuSetCpus     = job.Getenv("cpusetcpus")
		authConfig     = &registry.AuthConfig{}
		configFile     = &registry.ConfigFile{}
		tag            string
		context        io.ReadCloser
	)

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("configFile", configFile)

	repoName, tag = parsers.ParseRepositoryTag(repoName)
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			return job.Error(err)
		}
		if len(tag) > 0 {
			if err := graph.ValidateTagName(tag); err != nil {
				return job.Error(err)
			}
		}
	}

	if remoteURL == "" {
		context = ioutil.NopCloser(job.Stdin)
	} else if urlutil.IsGitURL(remoteURL) {
		if !urlutil.IsGitTransport(remoteURL) {
			remoteURL = "https://" + remoteURL
		}
		root, err := ioutil.TempDir("", "docker-build-git")
		if err != nil {
			return job.Error(err)
		}
		defer os.RemoveAll(root)

		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
			return job.Errorf("Error trying to use git: %s (%s)", err, output)
		}

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return job.Error(err)
		}
		context = c
	} else if urlutil.IsURL(remoteURL) {
		f, err := utils.Download(remoteURL)
		if err != nil {
			return job.Error(err)
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			return job.Error(err)
		}

		// When we're downloading just a Dockerfile put it in
		// the default name - don't allow the client to move/specify it
		dockerfileName = api.DefaultDockerfileName

		c, err := archive.Generate(dockerfileName, string(dockerFile))
		if err != nil {
			return job.Error(err)
		}
		context = c
	}
	defer context.Close()

	sf := utils.NewStreamFormatter(job.GetenvBool("json"))

	builder := &Builder{
		Daemon: b.Daemon,
		Engine: b.Engine,
		OutStream: &utils.StdoutFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &utils.StderrFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !suppressOutput,
		UtilizeCache:    !noCache,
		Remove:          rm,
		ForceRemove:     forceRm,
		Pull:            pull,
		OutOld:          job.Stdout,
		StreamFormatter: sf,
		AuthConfig:      authConfig,
		AuthConfigFile:  configFile,
		dockerfileName:  dockerfileName,
		cpuShares:       cpuShares,
		cpuSetCpus:      cpuSetCpus,
		memory:          memory,
		memorySwap:      memorySwap,
		cancelled:       job.WaitCancelled(),
	}

	id, err := builder.Run(context)
	if err != nil {
		return job.Error(err)
	}

	if repoName != "" {
		b.Daemon.Repositories().Set(repoName, tag, id, true)
	}
	return engine.StatusOK
}
Exemplo n.º 23
0
func (daemon *Daemon) ContainerCgroup(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}

	var (
		name           = job.Args[0]
		saveToFile     = job.GetenvBool("saveToFile")
		readSubsystem  = job.GetenvList("readSubsystem")
		writeSubsystem []struct {
			Key   string
			Value string
		}
		err error
	)

	job.GetenvJson("writeSubsystem", &writeSubsystem)

	log.Infof("name %s, readSubsystem %s, writeSubsystem %s", name, readSubsystem, writeSubsystem)

	if container := daemon.Get(name); container != nil {
		if !container.State.IsRunning() {
			return job.Errorf("Container %s is not running", name)
		}

		var object []interface{}

		// read
		for _, subsystem := range readSubsystem {
			var cgroupResponse struct {
				Subsystem string
				Out       string
				Err       string
				Status    int
			}

			cgroupResponse.Subsystem = subsystem
			output, err := fs.Get(container.ID, daemon.ExecutionDriver().Parent(), subsystem)
			if err != nil {
				cgroupResponse.Err = err.Error()
				cgroupResponse.Status = 255
			} else {
				cgroupResponse.Out = output
				cgroupResponse.Status = 0
			}
			object = append(object, cgroupResponse)
		}

		// write
		for _, pair := range writeSubsystem {
			var cgroupResponse struct {
				Subsystem string
				Out       string
				Err       string
				Status    int
			}

			cgroupResponse.Subsystem = pair.Key
			oldValue, _ := fs.Get(container.ID, daemon.ExecutionDriver().Parent(), pair.Key)

			err = fs.Set(container.ID, daemon.ExecutionDriver().Parent(), pair.Key, pair.Value)
			if err != nil {
				cgroupResponse.Err = err.Error()
				cgroupResponse.Status = 255
			} else {
				newValue, _ := fs.Get(container.ID, daemon.ExecutionDriver().Parent(), pair.Key)
				log.Infof("cgroup: %s old value: %s, new value: %s", pair.Key, oldValue, newValue)

				/* memory.limit_in_bytes 5g != 5368709120
				if newValue != pair.Value {
					return job.Errorf("cgroup %s change value failed, newValue %s is not same as expect value %s", pair.Key, newValue, pair.Value)
				}*/

				if err = updateConfig(container, pair.Key, pair.Value); err != nil {
					cgroupResponse.Out = err.Error()
					cgroupResponse.Status = 1
				} else {
					cgroupResponse.Out = newValue
					cgroupResponse.Status = 0
				}
			}
			object = append(object, cgroupResponse)
		}

		if saveToFile && err == nil {
			if err := container.ToDisk(); err != nil {
				return job.Error(err)
			}
		}

		b, err := json.Marshal(object)
		if err != nil {
			return job.Error(err)
		}

		job.Stdout.Write(b)
		return engine.StatusOK
	}
	return job.Errorf("No such container: %s", name)
}