Exemplo n.º 1
0
func jobVersion(job *engine.Job) engine.Status {
	if _, err := dockerVersion().WriteTo(job.Stdout); err != nil {
		job.Errorf("%s", err)
		return engine.StatusErr
	}
	return engine.StatusOK
}
Exemplo n.º 2
0
// CmdLookup return an image encoded in JSON
func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if image, err := s.LookupImage(name); err == nil && image != nil {
		if job.GetenvBool("dirty") {
			b, err := json.Marshal(image)
			if err != nil {
				return job.Error(err)
			}
			job.Stdout.Write(b)
			return engine.StatusOK
		}

		out := &engine.Env{}
		out.Set("Id", image.ID)
		out.Set("Parent", image.Parent)
		out.Set("Comment", image.Comment)
		out.SetAuto("Created", image.Created)
		out.Set("Container", image.Container)
		out.SetJson("ContainerConfig", image.ContainerConfig)
		out.Set("DockerVersion", image.DockerVersion)
		out.Set("Author", image.Author)
		out.SetJson("Config", image.Config)
		out.Set("Architecture", image.Architecture)
		out.Set("Os", image.OS)
		out.SetInt64("Size", image.Size)
		if _, err = out.WriteTo(job.Stdout); err != nil {
			return job.Error(err)
		}
		return engine.StatusOK
	}
	return job.Errorf("No such image: %s", name)
}
Exemplo n.º 3
0
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) engine.Status {
	var (
		protoAddrs = job.Args
		chErrors   = make(chan error, len(protoAddrs))
	)
	activationLock = make(chan struct{})

	if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil {
		return job.Error(err)
	}

	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		go func() {
			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
		}()
	}

	for i := 0; i < len(protoAddrs); i += 1 {
		err := <-chErrors
		if err != nil {
			return job.Error(err)
		}
	}

	return engine.StatusOK
}
Exemplo n.º 4
0
func (srv *Server) ListenAndServe(job *engine.Job) string {
	protoAddrs := job.Args
	chErrors := make(chan error, len(protoAddrs))
	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		switch protoAddrParts[0] {
		case "unix":
			if err := syscall.Unlink(protoAddrParts[1]); err != nil && !os.IsNotExist(err) {
				log.Fatal(err)
			}
		case "tcp":
			if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") {
				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
			}
		default:
			return "Invalid protocol format."
		}
		go func() {
			// FIXME: merge Server.ListenAndServe with ListenAndServe
			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
		}()
	}
	for i := 0; i < len(protoAddrs); i += 1 {
		err := <-chErrors
		if err != nil {
			return err.Error()
		}
	}
	return "0"
}
Exemplo n.º 5
0
// Allocate a network interface
func Allocate(job *engine.Job) engine.Status {
	var (
		ip          *net.IP
		err         error
		id          = job.Args[0]
		requestedIP = net.ParseIP(job.Getenv("RequestedIP"))
	)

	if requestedIP != nil {
		ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP)
	} else {
		ip, err = ipallocator.RequestIP(bridgeNetwork, nil)
	}
	if err != nil {
		return job.Error(err)
	}

	out := engine.Env{}
	out.Set("IP", ip.String())
	out.Set("Mask", bridgeNetwork.Mask.String())
	out.Set("Gateway", bridgeNetwork.IP.String())
	out.Set("Bridge", bridgeIface)

	size, _ := bridgeNetwork.Mask.Size()
	out.SetInt("IPPrefixLen", size)

	currentInterfaces.Set(id, &networkInterface{
		IP: *ip,
	})

	out.WriteTo(job.Stdout)

	return engine.StatusOK
}
Exemplo n.º 6
0
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if container := daemon.Get(name); container != nil {
		container.Lock()
		defer container.Unlock()
		if job.GetenvBool("raw") {
			b, err := json.Marshal(&struct {
				*Container
				HostConfig *runconfig.HostConfig
			}{container, container.hostConfig})
			if err != nil {
				return job.Error(err)
			}
			job.Stdout.Write(b)
			return engine.StatusOK
		}

		out := &engine.Env{}
		out.Set("Id", container.ID)
		out.SetAuto("Created", container.Created)
		out.Set("Path", container.Path)
		out.SetList("Args", container.Args)
		out.SetJson("Config", container.Config)
		out.SetJson("State", container.State)
		out.Set("Image", container.Image)
		out.SetJson("NetworkSettings", container.NetworkSettings)
		out.Set("ResolvConfPath", container.ResolvConfPath)
		out.Set("HostnamePath", container.HostnamePath)
		out.Set("HostsPath", container.HostsPath)
		out.Set("Name", container.Name)
		out.Set("Driver", container.Driver)
		out.Set("ExecDriver", container.ExecDriver)
		out.Set("MountLabel", container.MountLabel)
		out.Set("ProcessLabel", container.ProcessLabel)
		out.SetJson("Volumes", container.Volumes)
		out.SetJson("VolumesRW", container.VolumesRW)

		if children, err := daemon.Children(container.Name); err == nil {
			for linkAlias, child := range children {
				container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
			}
		}

		out.SetJson("HostConfig", container.hostConfig)

		container.hostConfig.Links = nil
		if _, err := out.WriteTo(job.Stdout); err != nil {
			return job.Error(err)
		}
		return engine.StatusOK
	}
	return job.Errorf("No such container: %s", name)
}
Exemplo n.º 7
0
func (w *Worker) Exec(job *engine.Job) engine.Status {
	fmt.Printf("--> %v\n", job.Args)
	cmd := exec.Command(job.Args[0], job.Args[1:]...)
	cmd.Stdout = job.Stdout
	cmd.Stderr = os.Stderr
	if err := cmd.Run(); err != nil {
		return job.Errorf("%v\n", err)
	}
	return engine.StatusOK
}
Exemplo n.º 8
0
func (hc *HostContext) exec(job *engine.Job, action hostbound) (status engine.Status) {
	if hc.tunnel == nil {
		return job.Errorf("Tunnel not open to host %s(id:%s)", hc.name, hc.id)
	}

	if client, err := newDockerHttpClient("tcp://localhost:8000", "v1.10"); err == nil {
		return action(client)
	} else {
		return job.Errorf("Unable to init http client: %v", err)
	}
}
Exemplo n.º 9
0
func (srv *Server) ContainerStart(job *engine.Job) string {
	if len(job.Args) < 1 {
		return fmt.Sprintf("Usage: %s container_id", job.Name)
	}
	name := job.Args[0]
	runtime := srv.runtime
	container := runtime.Get(name)

	if container == nil {
		return fmt.Sprintf("No such container: %s", name)
	}
	// If no environment was set, then no hostconfig was passed.
	if len(job.Environ()) > 0 {
		var hostConfig HostConfig
		if err := job.ExportEnv(&hostConfig); err != nil {
			return err.Error()
		}
		// Validate the HostConfig binds. Make sure that:
		// 1) the source of a bind mount isn't /
		//         The bind mount "/:/foo" isn't allowed.
		// 2) Check that the source exists
		//        The source to be bind mounted must exist.
		for _, bind := range hostConfig.Binds {
			splitBind := strings.Split(bind, ":")
			source := splitBind[0]

			// refuse to bind mount "/" to the container
			if source == "/" {
				return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
			}

			// ensure the source exists on the host
			_, err := os.Stat(source)
			if err != nil && os.IsNotExist(err) {
				return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
			}
		}
		// Register any links from the host config before starting the container
		// FIXME: we could just pass the container here, no need to lookup by name again.
		if err := srv.RegisterLinks(name, &hostConfig); err != nil {
			return err.Error()
		}
		container.hostConfig = &hostConfig
		container.ToDisk()
	}
	if err := container.Start(); err != nil {
		return fmt.Sprintf("Cannot start container %s: %s", name, err)
	}
	srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))

	return "0"
}
Exemplo n.º 10
0
// builtins jobs independent of any subsystem
func dockerVersion(job *engine.Job) engine.Status {
	v := &engine.Env{}
	v.Set("Version", dockerversion.VERSION)
	v.SetJson("ApiVersion", api.APIVERSION)
	v.Set("GitCommit", dockerversion.GITCOMMIT)
	v.Set("GoVersion", runtime.Version())
	v.Set("Os", runtime.GOOS)
	v.Set("Arch", runtime.GOARCH)
	if kernelVersion, err := utils.GetKernelVersion(); err == nil {
		v.Set("KernelVersion", kernelVersion.String())
	}
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 11
0
// CmdTag assigns a new name and tag to an existing image. If the tag already exists,
// it is changed and the image previously referenced by the tag loses that reference.
// This may cause the old image to be garbage-collected if its reference count reaches zero.
//
// Syntax: image_tag NEWNAME OLDNAME
// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0
func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
	if len(job.Args) != 2 {
		return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name)
	}
	var (
		newName = job.Args[0]
		oldName = job.Args[1]
	)
	newRepo, newTag := utils.ParseRepositoryTag(newName)
	// FIXME: Set should either parse both old and new name, or neither.
	// 	the current prototype is inconsistent.
	if err := s.Set(newRepo, newTag, oldName, true); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 12
0
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if container := daemon.Get(name); container != nil {
		if job.GetenvBool("dirty") {
			b, err := json.Marshal(&struct {
				*Container
				HostConfig *runconfig.HostConfig
			}{container, container.HostConfig()})
			if err != nil {
				return job.Error(err)
			}
			job.Stdout.Write(b)
			return engine.StatusOK
		}

		out := &engine.Env{}
		out.Set("Id", container.ID)
		out.SetAuto("Created", container.Created)
		out.Set("Path", container.Path)
		out.SetList("Args", container.Args)
		out.SetJson("Config", container.Config)
		out.SetJson("State", container.State)
		out.Set("Image", container.Image)
		out.SetJson("NetworkSettings", container.NetworkSettings)
		out.Set("ResolvConfPath", container.ResolvConfPath)
		out.Set("HostnamePath", container.HostnamePath)
		out.Set("HostsPath", container.HostsPath)
		out.Set("Name", container.Name)
		out.Set("Driver", container.Driver)
		out.Set("ExecDriver", container.ExecDriver)
		out.Set("MountLabel", container.MountLabel)
		out.Set("ProcessLabel", container.ProcessLabel)
		out.SetJson("Volumes", container.Volumes)
		out.SetJson("VolumesRW", container.VolumesRW)
		out.SetJson("HostConfig", container.hostConfig)
		if _, err := out.WriteTo(job.Stdout); err != nil {
			return job.Error(err)
		}
		return engine.StatusOK
	}
	return job.Errorf("No such container: %s", name)
}
Exemplo n.º 13
0
// release an interface for a select ip
func Release(job *engine.Job) engine.Status {
	var (
		id                 = job.Args[0]
		containerInterface = currentInterfaces.Get(id)
	)

	if containerInterface == nil {
		return job.Errorf("No network information to release for %s", id)
	}

	for _, nat := range containerInterface.PortMappings {
		if err := portmapper.Unmap(nat); err != nil {
			log.Printf("Unable to unmap port %s: %s", nat, err)
		}
	}

	if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
		log.Printf("Unable to release ip %s\n", err)
	}
	return engine.StatusOK
}
Exemplo n.º 14
0
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
func jobInitApi(job *engine.Job) string {
	job.Logf("Creating server")
	srv, err := NewServer(job.Eng, ConfigFromJob(job))
	if err != nil {
		return err.Error()
	}
	if srv.runtime.config.Pidfile != "" {
		job.Logf("Creating pidfile")
		if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
			log.Fatal(err)
		}
	}
	job.Logf("Setting up signal traps")
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
	go func() {
		sig := <-c
		log.Printf("Received signal '%v', exiting\n", sig)
		utils.RemovePidFile(srv.runtime.config.Pidfile)
		srv.Close()
		os.Exit(0)
	}()
	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
	if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
		return err.Error()
	}
	if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
		return err.Error()
	}
	if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
		return err.Error()
	}
	return "0"
}
Exemplo n.º 15
0
// release an interface for a select ip
func Release(job *engine.Job) engine.Status {
	var (
		id                 = job.Args[0]
		containerInterface = currentInterfaces.Get(id)
		ip                 net.IP
		port               int
		proto              string
	)

	if containerInterface == nil {
		return job.Errorf("No network information to release for %s", id)
	}

	for _, nat := range containerInterface.PortMappings {
		if err := portmapper.Unmap(nat); err != nil {
			log.Printf("Unable to unmap port %s: %s", nat, err)
		}

		// this is host mappings
		switch a := nat.(type) {
		case *net.TCPAddr:
			proto = "tcp"
			ip = a.IP
			port = a.Port
		case *net.UDPAddr:
			proto = "udp"
			ip = a.IP
			port = a.Port
		}

		if err := portallocator.ReleasePort(ip, proto, port); err != nil {
			log.Printf("Unable to release port %s", nat)
		}
	}

	if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
		log.Printf("Unable to release ip %s\n", err)
	}
	return engine.StatusOK
}
Exemplo n.º 16
0
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) engine.Status {
	if len(job.Args) == 0 {
		return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
	}
	var (
		protoAddrs = job.Args
		chErrors   = make(chan error, len(protoAddrs))
	)
	activationLock = make(chan struct{})

	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		if len(protoAddrParts) != 2 {
			return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
		}
		go func() {
			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
		}()
	}

	for i := 0; i < len(protoAddrs); i += 1 {
		err := <-chErrors
		if err != nil {
			return job.Error(err)
		}
	}

	return engine.StatusOK
}
Exemplo n.º 17
0
// CmdGet returns information about an image.
// If the image doesn't exist, an empty object is returned, to allow
// checking for an image's existence.
func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	res := &engine.Env{}
	img, err := s.LookupImage(name)
	// Note: if the image doesn't exist, LookupImage returns
	// nil, nil.
	if err != nil {
		return job.Error(err)
	}
	if img != nil {
		// We don't directly expose all fields of the Image objects,
		// to maintain a clean public API which we can maintain over
		// time even if the underlying structure changes.
		// We should have done this with the Image object to begin with...
		// but we didn't, so now we're doing it here.
		//
		// Fields that we're probably better off not including:
		//	- ID (the caller already knows it, and we stay more flexible on
		//		naming down the road)
		//	- Parent. That field is really an implementation detail of
		//		layer storage ("layer is a diff against this other layer).
		//		It doesn't belong at the same level as author/description/etc.
		//	- Config/ContainerConfig. Those structs have the same sprawl problem,
		//		so we shouldn't include them wholesale either.
		//	- Comment: initially created to fulfill the "every image is a git commit"
		//		metaphor, in practice people either ignore it or use it as a
		//		generic description field which it isn't. On deprecation shortlist.
		res.Set("created", fmt.Sprintf("%v", img.Created))
		res.Set("author", img.Author)
		res.Set("os", img.OS)
		res.Set("architecture", img.Architecture)
		res.Set("docker_version", img.DockerVersion)
	}
	res.WriteTo(job.Stdout)
	return engine.StatusOK
}
Exemplo n.º 18
0
// CmdSet stores a new image in the graph.
// Images are stored in the graph using 4 elements:
//	- A user-defined ID
//	- A collection of metadata describing the image
//	- A directory tree stored as a tar archive (also called the "layer")
//	- A reference to a "parent" ID on top of which the layer should be applied
//
// NOTE: even though the parent ID is only useful in relation to the layer and how
// to apply it (ie you could represent the full directory tree as 'parent_layer + layer',
// it is treated as a top-level property of the image. This is an artifact of early
// design and should probably be cleaned up in the future to simplify the design.
//
// Syntax: image_set ID
// Input:
//	- Layer content must be streamed in tar format on stdin. An empty input is
//	valid and represents a nil layer.
//
//	- Image metadata must be passed in the command environment.
//		'json': a json-encoded object with all image metadata.
//			It will be stored as-is, without any encoding/decoding artifacts.
//			That is a requirement of the current registry client implementation,
//			because a re-encoded json might invalidate the image checksum at
//			the next upload, even with functionaly identical content.
func (s *TagStore) CmdSet(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	var (
		imgJSON = []byte(job.Getenv("json"))
		layer   = job.Stdin
	)
	if len(imgJSON) == 0 {
		return job.Errorf("mandatory key 'json' is not set")
	}
	// We have to pass an *image.Image object, even though it will be completely
	// ignored in favor of the redundant json data.
	// FIXME: the current prototype of Graph.Register is stupid and redundant.
	img, err := image.NewImgJSON(imgJSON)
	if err != nil {
		return job.Error(err)
	}
	if err := s.graph.Register(imgJSON, layer, img); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemplo n.º 19
0
// Initializes the plugin by handling job arguments
func (rax *RaxCloud) init(job *engine.Job) (status engine.Status) {
	app := cli.NewApp()
	app.Name = "rax"
	app.Usage = "Deploy Docker containers onto your Rackspace Cloud account. Accepts environment variables as listed in: https://github.com/openstack/python-novaclient/#command-line-api"
	app.Version = "0.0.1"
	app.Flags = []cli.Flag{
		cli.StringFlag{"instance", "", "Sets the server instance to target for specific commands such as version."},
		cli.StringFlag{"region", "DFW", "Sets the Rackspace region to target. This overrides any environment variables for region."},
		cli.StringFlag{"key", "/etc/swarmd/rax.id_rsa", "Sets SSH keys file to use when initializing secure connections."},
		cli.StringFlag{"user", "root", "Username to use when initializing secure connections."},
		cli.StringFlag{"image", "", "Image to use when provisioning new hosts."},
		cli.StringFlag{"flavor", "", "Flavor to use when provisioning new hosts."},
		cli.StringFlag{"auto-scaling", "none", "Flag that when set determines the mode used for auto-scaling."},
	}

	var err error
	app.Action = func(ctx *cli.Context) {
		err = rax.run(ctx, job.Eng)
	}

	if len(job.Args) > 1 {
		app.Run(append([]string{job.Name}, job.Args...))
	}

	// Failed to init?
	if err != nil || rax.gopher == nil {
		app.Run([]string{"rax", "help"})

		if err == nil {
			return engine.StatusErr
		} else {
			return job.Error(err)
		}
	}

	return engine.StatusOK
}
Exemplo n.º 20
0
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) engine.Status {
	protoAddrs := job.Args
	chErrors := make(chan error, len(protoAddrs))

	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		go func() {
			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
		}()
	}

	for i := 0; i < len(protoAddrs); i += 1 {
		err := <-chErrors
		if err != nil {
			return job.Error(err)
		}
	}

	// Tell the init daemon we are accepting requests
	go systemd.SdNotify("READY=1")

	return engine.StatusOK
}
Exemplo n.º 21
0
// CmdGet returns information about an image.
// If the image doesn't exist, an empty object is returned, to allow
// checking for an image's existence.
func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	res := &engine.Env{}
	img, err := s.LookupImage(name)
	// Note: if the image doesn't exist, LookupImage returns
	// nil, nil.
	if err != nil {
		return job.Error(err)
	}
	if img != nil {
		// We don't directly expose all fields of the Image objects,
		// to maintain a clean public API which we can maintain over
		// time even if the underlying structure changes.
		// We should have done this with the Image object to begin with...
		// but we didn't, so now we're doing it here.
		//
		// Fields that we're probably better off not including:
		//	- Config/ContainerConfig. Those structs have the same sprawl problem,
		//		so we shouldn't include them wholesale either.
		//	- Comment: initially created to fulfill the "every image is a git commit"
		//		metaphor, in practice people either ignore it or use it as a
		//		generic description field which it isn't. On deprecation shortlist.
		res.SetAuto("Created", img.Created)
		res.Set("Author", img.Author)
		res.Set("Os", img.OS)
		res.Set("Architecture", img.Architecture)
		res.Set("DockerVersion", img.DockerVersion)
		res.Set("Id", img.ID)
		res.Set("Parent", img.Parent)
	}
	res.WriteTo(job.Stdout)
	return engine.StatusOK
}
Exemplo n.º 22
0
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was sucessful.
// It can be used to verify the validity of a client's credentials.
func (s *Service) Auth(job *engine.Job) engine.Status {
	var (
		err        error
		authConfig = &AuthConfig{}
	)

	job.GetenvJson("authConfig", authConfig)
	// TODO: this is only done here because auth and registry need to be merged into one pkg
	if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() {
		addr, err = ExpandAndVerifyRegistryUrl(addr)
		if err != nil {
			return job.Error(err)
		}
		authConfig.ServerAddress = addr
	}
	status, err := Login(authConfig, HTTPRequestFactory(nil))
	if err != nil {
		return job.Error(err)
	}
	job.Printf("%s\n", status)
	return engine.StatusOK
}
Exemplo n.º 23
0
// CmdTarLayer return the tarLayer of the image
func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if image, err := s.LookupImage(name); err == nil && image != nil {
		fs, err := image.TarLayer()
		if err != nil {
			return job.Error(err)
		}
		defer fs.Close()

		if written, err := io.Copy(job.Stdout, fs); err != nil {
			return job.Error(err)
		} else {
			utils.Debugf("rendered layer for %s of [%d] size", image.ID, written)
		}

		return engine.StatusOK
	}
	return job.Errorf("No such image: %s", name)
}
Exemplo n.º 24
0
// CmdLookup return an image encoded in JSON
func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if image, err := s.LookupImage(name); err == nil && image != nil {
		b, err := json.Marshal(image)
		if err != nil {
			return job.Error(err)
		}
		job.Stdout.Write(b)
		return engine.StatusOK
	}
	return job.Errorf("No such image: %s", name)
}
Exemplo n.º 25
0
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("usage: %s NAME", job.Name)
	}
	name := job.Args[0]
	if container := daemon.Get(name); container != nil {
		b, err := json.Marshal(&struct {
			*Container
			HostConfig *runconfig.HostConfig
		}{container, container.HostConfig()})
		if err != nil {
			return job.Error(err)
		}
		job.Stdout.Write(b)
		return engine.StatusOK
	}
	return job.Errorf("No such container: %s", name)
}
Exemplo n.º 26
0
func (srv *Server) ContainerCreate(job *engine.Job) string {
	var name string
	if len(job.Args) == 1 {
		name = job.Args[0]
	} else if len(job.Args) > 1 {
		return fmt.Sprintf("Usage: %s ", job.Name)
	}
	var config Config
	if err := job.ExportEnv(&config); err != nil {
		return err.Error()
	}
	if config.Memory != 0 && config.Memory < 524288 {
		return "Minimum memory limit allowed is 512k"
	}
	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
		config.Memory = 0
	}
	if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
		config.MemorySwap = -1
	}
	container, buildWarnings, err := srv.runtime.Create(&config, name)
	if err != nil {
		if srv.runtime.graph.IsNotExist(err) {
			_, tag := utils.ParseRepositoryTag(config.Image)
			if tag == "" {
				tag = DEFAULTTAG
			}
			return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
		}
		return err.Error()
	}
	srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
	// FIXME: this is necessary because runtime.Create might return a nil container
	// with a non-nil error. This should not happen! Once it's fixed we
	// can remove this workaround.
	if container != nil {
		job.Printf("%s\n", container.ID)
	}
	for _, warning := range buildWarnings {
		job.Errorf("%s\n", warning)
	}
	return "0"
}
Exemplo n.º 27
0
func (srv *Server) ContainerCreate(job *engine.Job) string {
	var name string
	if len(job.Args) == 1 {
		name = job.Args[0]
	} else if len(job.Args) > 1 {
		return fmt.Sprintf("Usage: %s ", job.Name)
	}
	var config Config
	if err := job.ExportEnv(&config); err != nil {
		return err.Error()
	}
	if config.Memory != 0 && config.Memory < 524288 {
		return "Minimum memory limit allowed is 512k"
	}
	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
		config.Memory = 0
	}
	if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
		config.MemorySwap = -1
	}
	container, buildWarnings, err := srv.runtime.Create(&config, name)
	if err != nil {
		if srv.runtime.graph.IsNotExist(err) {
			_, tag := utils.ParseRepositoryTag(config.Image)
			if tag == "" {
				tag = DEFAULTTAG
			}
			return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
		}
		return err.Error()
	}
	srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
	job.Printf("%s\n", container.ID)
	for _, warning := range buildWarnings {
		job.Errorf("%s\n", warning)
	}
	return "0"
}
Exemplo n.º 28
0
func LinkContainers(job *engine.Job) engine.Status {
	var (
		action       = job.Args[0]
		childIP      = job.Getenv("ChildIP")
		parentIP     = job.Getenv("ParentIP")
		ignoreErrors = job.GetenvBool("IgnoreErrors")
		ports        = job.GetenvList("Ports")
	)
	split := func(p string) (string, string) {
		parts := strings.Split(p, "/")
		return parts[0], parts[1]
	}

	for _, p := range ports {
		port, proto := split(p)
		if output, err := iptables.Raw(action, "FORWARD",
			"-i", bridgeIface, "-o", bridgeIface,
			"-p", proto,
			"-s", parentIP,
			"--dport", port,
			"-d", childIP,
			"-j", "ACCEPT"); !ignoreErrors && err != nil {
			return job.Error(err)
		} else if len(output) != 0 {
			return job.Errorf("Error toggle iptables forward: %s", output)
		}

		if output, err := iptables.Raw(action, "FORWARD",
			"-i", bridgeIface, "-o", bridgeIface,
			"-p", proto,
			"-s", childIP,
			"--sport", port,
			"-d", parentIP,
			"-j", "ACCEPT"); !ignoreErrors && err != nil {
			return job.Error(err)
		} else if len(output) != 0 {
			return job.Errorf("Error toggle iptables forward: %s", output)
		}
	}
	return engine.StatusOK
}
Exemplo n.º 29
0
func InitDriver(job *engine.Job) engine.Status {
	var (
		network        *net.IPNet
		enableIPTables = job.GetenvBool("EnableIptables")
		icc            = job.GetenvBool("InterContainerCommunication")
		ipForward      = job.GetenvBool("EnableIpForward")
		bridgeIP       = job.Getenv("BridgeIP")
	)

	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
		defaultBindingIP = net.ParseIP(defaultIP)
	}

	bridgeIface = job.Getenv("BridgeIface")
	usingDefaultBridge := false
	if bridgeIface == "" {
		usingDefaultBridge = true
		bridgeIface = DefaultNetworkBridge
	}

	addr, err := networkdriver.GetIfaceAddr(bridgeIface)
	if err != nil {
		// If we're not using the default bridge, fail without trying to create it
		if !usingDefaultBridge {
			job.Logf("bridge not found: %s", bridgeIface)
			return job.Error(err)
		}
		// If the iface is not found, try to create it
		job.Logf("creating new bridge for %s", bridgeIface)
		if err := createBridge(bridgeIP); err != nil {
			return job.Error(err)
		}

		job.Logf("getting iface addr")
		addr, err = networkdriver.GetIfaceAddr(bridgeIface)
		if err != nil {
			return job.Error(err)
		}
		network = addr.(*net.IPNet)
	} else {
		network = addr.(*net.IPNet)
		// validate that the bridge ip matches the ip specified by BridgeIP
		if bridgeIP != "" {
			bip, _, err := net.ParseCIDR(bridgeIP)
			if err != nil {
				return job.Error(err)
			}
			if !network.IP.Equal(bip) {
				return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip)
			}
		}
	}

	// Configure iptables for link support
	if enableIPTables {
		if err := setupIPTables(addr, icc); err != nil {
			return job.Error(err)
		}
	}

	if ipForward {
		// Enable IPv4 forwarding
		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
		}
	}

	// We can always try removing the iptables
	if err := iptables.RemoveExistingChain("DOCKER"); err != nil {
		return job.Error(err)
	}

	if enableIPTables {
		chain, err := iptables.NewChain("DOCKER", bridgeIface)
		if err != nil {
			return job.Error(err)
		}
		portmapper.SetIptablesChain(chain)
	}

	bridgeNetwork = network

	// https://github.com/dotcloud/docker/issues/2768
	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP)

	for name, f := range map[string]engine.Handler{
		"allocate_interface": Allocate,
		"release_interface":  Release,
		"allocate_port":      AllocatePort,
		"link":               LinkContainers,
	} {
		if err := job.Eng.Register(name, f); err != nil {
			return job.Error(err)
		}
	}
	return engine.StatusOK
}
Exemplo n.º 30
0
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) engine.Status {
	var (
		err error

		ip            = defaultBindingIP
		id            = job.Args[0]
		hostIP        = job.Getenv("HostIP")
		origHostPort  = job.GetenvInt("HostPort")
		containerPort = job.GetenvInt("ContainerPort")
		proto         = job.Getenv("Proto")
		network       = currentInterfaces.Get(id)
	)

	if hostIP != "" {
		ip = net.ParseIP(hostIP)
	}

	var (
		hostPort  int
		container net.Addr
		host      net.Addr
	)

	/*
	 Try up to 10 times to get a port that's not already allocated.

	 In the event of failure to bind, return the error that portmapper.Map
	 yields.
	*/
	for i := 0; i < 10; i++ {
		// host ip, proto, and host port
		hostPort, err = portallocator.RequestPort(ip, proto, origHostPort)

		if err != nil {
			return job.Error(err)
		}

		if proto == "tcp" {
			host = &net.TCPAddr{IP: ip, Port: hostPort}
			container = &net.TCPAddr{IP: network.IP, Port: containerPort}
		} else {
			host = &net.UDPAddr{IP: ip, Port: hostPort}
			container = &net.UDPAddr{IP: network.IP, Port: containerPort}
		}

		if err = portmapper.Map(container, ip, hostPort); err == nil {
			break
		}

		job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort)
	}

	if err != nil {
		return job.Error(err)
	}

	network.PortMappings = append(network.PortMappings, host)

	out := engine.Env{}
	out.Set("HostIP", ip.String())
	out.SetInt("HostPort", hostPort)

	if _, err := out.WriteTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}