func ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
	)
	v := url.Values{}

	if job.EnvExists("t") {
		v.Set("t", strconv.Itoa(job.GetenvInt("t")))
	}

	ship := shipWithContainerId(job, name)

	cli := client.NewKraneClientApi(*ship, false, job)

	if ship != nil {
		body, statusCode, err := readBody(cli.Call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
		if statusCode == 304 {
			return job.Errorf("Container already stopped")
		} else if statusCode == 404 {
			return job.Errorf("No such container: %s\n", name)
		} else if statusCode == 404 {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		} else if (statusCode >= 200) && (statusCode < 300) {
			fmt.Printf("%s", body)
		} else {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
func ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
	)
	v := url.Values{}

	if job.EnvExists("t") {
		v.Set("t", strconv.Itoa(job.GetenvInt("t")))
	}

	ship := shipWithContainerId(job, name)

	if ship != nil {
		_, statusCode, err := httpRequest("POST", "http", ship.Fqdn, ship.Port, "/containers/"+name+"/restart?"+v.Encode(), nil, false)
		if statusCode == 404 {
			return job.Errorf("No such container: %s\n", name)
		} else if statusCode == 500 {
			return job.Errorf("Server error trying to restart %s: %s\n", name, err)
		} else if (statusCode >= 200) && (statusCode < 300) {
			fmt.Printf("Container %s restarted\n", name)
		} else {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
Exemple #3
0
func (s *TagStore) CmdDiffAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 3 {
		return job.Errorf("Usage : %s CONTAINERID SRCIMAGEID TAGIMAGEID", job.Name)
	}

	var (
		containerID   = job.Args[0]
		localName     = job.Args[1]
		parentImageID = job.Args[2]
		sf            = utils.NewStreamFormatter(job.GetenvBool("json"))
		rate          = 0 // the rate of image layer data is written to the container per second
	)
	if job.EnvExists("rate") {
		rate = job.GetenvInt("rate")
	}

	img, err := s.LookupImage(localName)
	if err != nil {
		return job.Error(err)
	}

	dest := s.graph.Driver().MountPath(containerID)
	fi, err := os.Stat(dest)
	if err != nil && !os.IsExist(err) {
		return job.Error(err)
	}
	if !fi.IsDir() {
		return job.Errorf(" Dest %s is not dir", dest)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Diff two mirrors(%s - %s)", img.ID, parentImageID), nil))
	fs, err := s.graph.Driver().Diff(img.ID, parentImageID, nil)
	if err != nil {
		return job.Error(err)
	}
	defer fs.Close()
	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
	err = archive.ApplyLayer(dest, fs, int64(rate))
	if err != nil {
		return job.Error(err)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))
	return engine.StatusOK
}
Exemple #4
0
func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 1 {
		return job.Errorf("Usage: %s NAMESPACE", job.Name)
	}
	var (
		namespace = job.Args[0]
		keyBytes  = job.Getenv("PublicKey")
	)

	if keyBytes == "" {
		return job.Errorf("Missing PublicKey")
	}
	pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes))
	if err != nil {
		return job.Errorf("Error unmarshalling public key: %s", err)
	}

	permission := uint16(job.GetenvInt("Permission"))
	if permission == 0 {
		permission = 0x03
	}

	t.RLock()
	defer t.RUnlock()
	if t.graph == nil {
		job.Stdout.Write([]byte("no graph"))
		return engine.StatusOK
	}

	// Check if any expired grants
	verified, err := t.graph.Verify(pk, namespace, permission)
	if err != nil {
		return job.Errorf("Error verifying key to namespace: %s", namespace)
	}
	if !verified {
		log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID())
		job.Stdout.Write([]byte("not verified"))
	} else if t.expiration.Before(time.Now()) {
		job.Stdout.Write([]byte("expired"))
	} else {
		job.Stdout.Write([]byte("verified"))
	}

	return engine.StatusOK
}
Exemple #5
0
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if err := container.Restart(int(t)); err != nil {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
		job.Eng.Job("log", "restart", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
Exemple #6
0
func (daemon *Daemon) ContainerRestart(job *engine.Job) error {
	if len(job.Args) != 1 {
		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	container, err := daemon.Get(name)
	if err != nil {
		return err
	}
	if err := container.Restart(int(t)); err != nil {
		return fmt.Errorf("Cannot restart container %s: %s\n", name, err)
	}
	container.LogEvent("restart")
	return nil
}
Exemple #7
0
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if err := container.Restart(int(t)); err != nil {
			return job.Errorf("Cannot restart container %s: %s\n", name, err)
		}
		container.LogEvent("restart")
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
Exemple #8
0
// ConfigFromJob creates and returns a new DaemonConfig object
// by parsing the contents of a job's environment.
func ConfigFromJob(job *engine.Job) *Config {
	config := &Config{
		Pidfile:                     job.Getenv("Pidfile"),
		Root:                        job.Getenv("Root"),
		AutoRestart:                 job.GetenvBool("AutoRestart"),
		EnableIptables:              job.GetenvBool("EnableIptables"),
		EnableIpForward:             job.GetenvBool("EnableIpForward"),
		BridgeIP:                    job.Getenv("BridgeIP"),
		BridgeIface:                 job.Getenv("BridgeIface"),
		DefaultIp:                   net.ParseIP(job.Getenv("DefaultIp")),
		InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
		GraphDriver:                 job.Getenv("GraphDriver"),
		ExecDriver:                  job.Getenv("ExecDriver"),
		EnableSelinuxSupport:        job.GetenvBool("EnableSelinuxSupport"),
	}
	if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil {
		config.GraphOptions = graphOpts
	}

	if dns := job.GetenvList("Dns"); dns != nil {
		config.Dns = dns
	}
	if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil {
		config.DnsSearch = dnsSearch
	}
	if mtu := job.GetenvInt("Mtu"); mtu != 0 {
		config.Mtu = mtu
	} else {
		config.Mtu = GetDefaultNetworkMtu()
	}
	config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
	if sockets := job.GetenvList("Sockets"); sockets != nil {
		config.Sockets = sockets
	}

	return config
}
Exemple #9
0
func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := srv.daemon.Get(name); container != nil {
		if !container.State.IsRunning() {
			return job.Errorf("Container already stopped")
		}
		if err := container.Stop(int(t)); err != nil {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
		srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
Exemple #10
0
func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
	}
	var (
		name = job.Args[0]
		t    = 10
	)
	if job.EnvExists("t") {
		t = job.GetenvInt("t")
	}
	if container := daemon.Get(name); container != nil {
		if !container.IsRunning() {
			return job.Errorf("Container already stopped")
		}
		if err := container.Stop(int(t)); err != nil {
			return job.Errorf("Cannot stop container %s: %s\n", name, err)
		}
		container.LogEvent("stop")
	} else {
		return job.Errorf("No such container: %s\n", name)
	}
	return engine.StatusOK
}
Exemple #11
0
func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
	var (
		foundBefore bool
		displayed   int
		all         = job.GetenvBool("all")
		since       = job.Getenv("since")
		before      = job.Getenv("before")
		n           = job.GetenvInt("limit")
		size        = job.GetenvBool("size")
		psFilters   filters.Args
		filt_exited []int
	)
	outs := engine.NewTable("Created", 0)

	psFilters, err := filters.FromParam(job.Getenv("filters"))
	if err != nil {
		return job.Error(err)
	}
	if i, ok := psFilters["exited"]; ok {
		for _, value := range i {
			code, err := strconv.Atoi(value)
			if err != nil {
				return job.Error(err)
			}
			filt_exited = append(filt_exited, code)
		}
	}

	if i, ok := psFilters["status"]; ok {
		for _, value := range i {
			if value == "exited" {
				all = true
			}
		}
	}
	names := map[string][]string{}
	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
		names[e.ID()] = append(names[e.ID()], p)
		return nil
	}, 1)

	var beforeCont, sinceCont *Container
	if before != "" {
		beforeCont, err = daemon.Get(before)
		if err != nil {
			return job.Error(err)
		}
	}

	if since != "" {
		sinceCont, err = daemon.Get(since)
		if err != nil {
			return job.Error(err)
		}
	}

	errLast := errors.New("last container")
	writeCont := func(container *Container) error {
		container.Lock()
		defer container.Unlock()
		if !container.Running && !all && n <= 0 && since == "" && before == "" {
			return nil
		}
		if !psFilters.Match("name", container.Name) {
			return nil
		}

		if !psFilters.Match("id", container.ID) {
			return nil
		}

		if !psFilters.MatchKVList("label", container.Config.Labels) {
			return nil
		}

		if before != "" && !foundBefore {
			if container.ID == beforeCont.ID {
				foundBefore = true
			}
			return nil
		}
		if n > 0 && displayed == n {
			return errLast
		}
		if since != "" {
			if container.ID == sinceCont.ID {
				return errLast
			}
		}
		if len(filt_exited) > 0 {
			should_skip := true
			for _, code := range filt_exited {
				if code == container.ExitCode && !container.Running {
					should_skip = false
					break
				}
			}
			if should_skip {
				return nil
			}
		}

		if !psFilters.Match("status", container.State.StateString()) {
			return nil
		}
		displayed++
		out := &engine.Env{}
		out.SetJson("Id", container.ID)
		out.SetList("Names", names[container.ID])
		img := container.Config.Image
		_, tag := parsers.ParseRepositoryTag(container.Config.Image)
		if tag == "" {
			img = utils.ImageReference(img, graph.DEFAULTTAG)
		}
		out.SetJson("Image", img)
		if len(container.Args) > 0 {
			args := []string{}
			for _, arg := range container.Args {
				if strings.Contains(arg, " ") {
					args = append(args, fmt.Sprintf("'%s'", arg))
				} else {
					args = append(args, arg)
				}
			}
			argsAsString := strings.Join(args, " ")

			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
		} else {
			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
		}
		out.SetInt64("Created", container.Created.Unix())
		out.Set("Status", container.State.String())
		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
		if err != nil {
			return err
		}
		out.Set("Ports", str)
		if size {
			sizeRw, sizeRootFs := container.GetSize()
			out.SetInt64("SizeRw", sizeRw)
			out.SetInt64("SizeRootFs", sizeRootFs)
		}
		out.SetJson("Labels", container.Config.Labels)
		outs.Add(out)
		return nil
	}

	for _, container := range daemon.List() {
		if err := writeCont(container); err != nil {
			if err != errLast {
				return job.Error(err)
			}
			break
		}
	}
	outs.ReverseSort()
	if _, err := outs.WriteListTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemple #12
0
func (daemon *Daemon) Containers(job *engine.Job) error {
	var (
		foundBefore bool
		displayed   int
		all         = job.GetenvBool("all")
		since       = job.Getenv("since")
		before      = job.Getenv("before")
		n           = job.GetenvInt("limit")
		size        = job.GetenvBool("size")
		psFilters   filters.Args
		filtExited  []int
	)
	containers := []types.Container{}

	psFilters, err := filters.FromParam(job.Getenv("filters"))
	if err != nil {
		return err
	}
	if i, ok := psFilters["exited"]; ok {
		for _, value := range i {
			code, err := strconv.Atoi(value)
			if err != nil {
				return err
			}
			filtExited = append(filtExited, code)
		}
	}

	if i, ok := psFilters["status"]; ok {
		for _, value := range i {
			if value == "exited" {
				all = true
			}
		}
	}
	names := map[string][]string{}
	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
		names[e.ID()] = append(names[e.ID()], p)
		return nil
	}, 1)

	var beforeCont, sinceCont *Container
	if before != "" {
		beforeCont, err = daemon.Get(before)
		if err != nil {
			return err
		}
	}

	if since != "" {
		sinceCont, err = daemon.Get(since)
		if err != nil {
			return err
		}
	}

	errLast := errors.New("last container")
	writeCont := func(container *Container) error {
		container.Lock()
		defer container.Unlock()
		if !container.Running && !all && n <= 0 && since == "" && before == "" {
			return nil
		}
		if !psFilters.Match("name", container.Name) {
			return nil
		}

		if !psFilters.Match("id", container.ID) {
			return nil
		}

		if !psFilters.MatchKVList("label", container.Config.Labels) {
			return nil
		}

		if before != "" && !foundBefore {
			if container.ID == beforeCont.ID {
				foundBefore = true
			}
			return nil
		}
		if n > 0 && displayed == n {
			return errLast
		}
		if since != "" {
			if container.ID == sinceCont.ID {
				return errLast
			}
		}
		if len(filtExited) > 0 {
			shouldSkip := true
			for _, code := range filtExited {
				if code == container.ExitCode && !container.Running {
					shouldSkip = false
					break
				}
			}
			if shouldSkip {
				return nil
			}
		}

		if !psFilters.Match("status", container.State.StateString()) {
			return nil
		}
		displayed++
		newC := types.Container{
			ID:    container.ID,
			Names: names[container.ID],
		}
		img := container.Config.Image
		_, tag := parsers.ParseRepositoryTag(container.Config.Image)
		if tag == "" {
			img = utils.ImageReference(img, graph.DEFAULTTAG)
		}
		newC.Image = img
		if len(container.Args) > 0 {
			args := []string{}
			for _, arg := range container.Args {
				if strings.Contains(arg, " ") {
					args = append(args, fmt.Sprintf("'%s'", arg))
				} else {
					args = append(args, arg)
				}
			}
			argsAsString := strings.Join(args, " ")

			newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
		} else {
			newC.Command = fmt.Sprintf("%s", container.Path)
		}
		newC.Created = int(container.Created.Unix())
		newC.Status = container.State.String()

		newC.Ports = []types.Port{}
		for port, bindings := range container.NetworkSettings.Ports {
			p, _ := nat.ParsePort(port.Port())
			if len(bindings) == 0 {
				newC.Ports = append(newC.Ports, types.Port{
					PrivatePort: p,
					Type:        port.Proto(),
				})
				continue
			}
			for _, binding := range bindings {
				h, _ := nat.ParsePort(binding.HostPort)
				newC.Ports = append(newC.Ports, types.Port{
					PrivatePort: p,
					PublicPort:  h,
					Type:        port.Proto(),
					IP:          binding.HostIp,
				})
			}
		}

		if size {
			sizeRw, sizeRootFs := container.GetSize()
			newC.SizeRw = int(sizeRw)
			newC.SizeRootFs = int(sizeRootFs)
		}
		newC.Labels = container.Config.Labels
		containers = append(containers, newC)
		return nil
	}

	for _, container := range daemon.List() {
		if err := writeCont(container); err != nil {
			if err != errLast {
				return err
			}
			break
		}
	}
	sort.Sort(sort.Reverse(ByCreated(containers)))
	if err = json.NewEncoder(job.Stdout).Encode(containers); err != nil {
		return err
	}
	return nil
}
Exemple #13
0
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) engine.Status {
	var (
		err error

		ip            = defaultBindingIP
		id            = job.Args[0]
		hostIP        = job.Getenv("HostIP")
		hostPort      = job.GetenvInt("HostPort")
		containerPort = job.GetenvInt("ContainerPort")
		proto         = job.Getenv("Proto")
		network       = currentInterfaces.Get(id)
	)

	if hostIP != "" {
		ip = net.ParseIP(hostIP)
		if ip == nil {
			return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
		}
	}

	// host ip, proto, and host port
	var container net.Addr
	switch proto {
	case "tcp":
		container = &net.TCPAddr{IP: network.IP, Port: containerPort}
	case "udp":
		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
	default:
		return job.Errorf("unsupported address type %s", proto)
	}

	//
	// Try up to 10 times to get a port that's not already allocated.
	//
	// In the event of failure to bind, return the error that portmapper.Map
	// yields.
	//

	var host net.Addr
	for i := 0; i < MaxAllocatedPortAttempts; i++ {
		if host, err = portmapper.Map(container, ip, hostPort); err == nil {
			break
		}

		if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok {
			// There is no point in immediately retrying to map an explicitly
			// chosen port.
			if hostPort != 0 {
				job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
				break
			}

			// Automatically chosen 'free' port failed to bind: move on the next.
			job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
		} else {
			// some other error during mapping
			job.Logf("Received an unexpected error during port allocation: %s", err.Error())
			break
		}
	}

	if err != nil {
		return job.Error(err)
	}

	network.PortMappings = append(network.PortMappings, host)

	out := engine.Env{}
	switch netAddr := host.(type) {
	case *net.TCPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	case *net.UDPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	}
	if _, err := out.WriteTo(job.Stdout); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemple #14
0
// Allocate an external port and map it to the interface
func AllocatePort(job *engine.Job) error {
	var (
		err error

		ip            = defaultBindingIP
		id            = job.Args[0]
		hostIP        = job.Getenv("HostIP")
		hostPort      = job.GetenvInt("HostPort")
		containerPort = job.GetenvInt("ContainerPort")
		proto         = job.Getenv("Proto")
		network       = currentInterfaces.Get(id)
	)

	if hostIP != "" {
		ip = net.ParseIP(hostIP)
		if ip == nil {
			return fmt.Errorf("Bad parameter: invalid host ip %s", hostIP)
		}
	}

	// host ip, proto, and host port
	var container net.Addr
	switch proto {
	case "tcp":
		container = &net.TCPAddr{IP: network.IP, Port: containerPort}
	case "udp":
		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
	default:
		return fmt.Errorf("unsupported address type %s", proto)
	}

	//
	// Try up to 10 times to get a port that's not already allocated.
	//
	// In the event of failure to bind, return the error that portmapper.Map
	// yields.
	//

	var host net.Addr
	for i := 0; i < MaxAllocatedPortAttempts; i++ {
		if host, err = portmapper.Map(container, ip, hostPort); err == nil {
			break
		}
		// There is no point in immediately retrying to map an explicitly
		// chosen port.
		if hostPort != 0 {
			logrus.Warnf("Failed to allocate and map port %d: %s", hostPort, err)
			break
		}
		logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
	}

	if err != nil {
		return err
	}

	network.PortMappings = append(network.PortMappings, host)

	out := engine.Env{}
	switch netAddr := host.(type) {
	case *net.TCPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	case *net.UDPAddr:
		out.Set("HostIP", netAddr.IP.String())
		out.SetInt("HostPort", netAddr.Port)
	}
	if _, err := out.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemple #15
0
func (srv *Server) Containers(job *engine.Job) engine.Status {
	var (
		foundBefore bool
		displayed   int
		all         = job.GetenvBool("all")
		since       = job.Getenv("since")
		before      = job.Getenv("before")
		n           = job.GetenvInt("limit")
		size        = job.GetenvBool("size")
	)
	outs := engine.NewTable("Created", 0)

	names := map[string][]string{}
	srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
		names[e.ID()] = append(names[e.ID()], p)
		return nil
	}, -1)

	var beforeCont, sinceCont *daemon.Container
	if before != "" {
		beforeCont = srv.daemon.Get(before)
		if beforeCont == nil {
			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
		}
	}

	if since != "" {
		sinceCont = srv.daemon.Get(since)
		if sinceCont == nil {
			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
		}
	}

	errLast := errors.New("last container")
	writeCont := func(container *daemon.Container) error {
		container.Lock()
		defer container.Unlock()
		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
			return nil
		}
		if before != "" && !foundBefore {
			if container.ID == beforeCont.ID {
				foundBefore = true
			}
			return nil
		}
		if n > 0 && displayed == n {
			return errLast
		}
		if since != "" {
			if container.ID == sinceCont.ID {
				return errLast
			}
		}
		displayed++
		out := &engine.Env{}
		out.Set("Id", container.ID)
		out.SetList("Names", names[container.ID])
		out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
		if len(container.Args) > 0 {
			args := []string{}
			for _, arg := range container.Args {
				if strings.Contains(arg, " ") {
					args = append(args, fmt.Sprintf("'%s'", arg))
				} else {
					args = append(args, arg)
				}
			}
			argsAsString := strings.Join(args, " ")

			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
		} else {
			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
		}
		out.SetInt64("Created", container.Created.Unix())
		out.Set("Status", container.State.String())
		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
		if err != nil {
			return err
		}
		out.Set("Ports", str)
		if size {
			sizeRw, sizeRootFs := container.GetSize()
			out.SetInt64("SizeRw", sizeRw)
			out.SetInt64("SizeRootFs", sizeRootFs)
		}
		outs.Add(out)
		return nil
	}

	for _, container := range srv.daemon.List() {
		if err := writeCont(container); err != nil {
			if err != errLast {
				return job.Error(err)
			}
			break
		}
	}
	outs.ReverseSort()
	if _, err := outs.WriteListTo(job.Stdout); err != nil {
		return job.Error(err)
	}
	return engine.StatusOK
}
Exemple #16
0
func (s *TagStore) CmdPullAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 4 {
		return job.Errorf("Usage: %s CONTAINERID CONTAINERIMAGE IMAGE TAG", job.Name)
	}

	var (
		containerID    = job.Args[0]
		containerImage = job.Args[1]
		localName      = job.Args[2]
		tag            = job.Args[3]
		sf             = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig     = &registry.AuthConfig{}
		metaHeaders    map[string][]string
		mirrors        []string
		rate           = 0
	)
	if job.EnvExists("rate") {
		rate = job.GetenvInt("rate")
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	for {
		c, err := s.poolAdd("pull", localName+":"+tag)
		if err != nil {
			if c != nil {
				// Another pull of the same repository is already taking place; just wait for it to finish
				job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
				<-c
				continue
			} else {
				return job.Error(err)
			}
		}
		break
	}
	defer s.poolRemove("pull", localName+":"+tag)

	// Resolve the Repository name from fqn to endpoint + name
	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
	if err != nil {
		return job.Error(err)
	}

	endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries)
	if err != nil {
		return job.Error(err)
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return job.Error(err)
	}

	if endpoint.VersionString(1) == registry.IndexServerAddress() {
		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
		localName = remoteName
		isOfficial := isOfficialName(remoteName)
		if isOfficial && strings.IndexRune(remoteName, '/') == -1 {
			remoteName = "library/" + remoteName
		}
	}
	// Use provided mirrors, if any
	mirrors = s.mirrors

	if err = s.pullMRepository(r, job.Stdout, containerID, containerImage, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors, rate); err != nil {
		return job.Error(err)
	}

	return engine.StatusOK
}