func (p *LinuxResourcePool) releaseSystemResources(logger lager.Logger, id string) error {
	pRunner := logging.Runner{
		CommandRunner: p.runner,
		Logger:        logger,
	}

	bridgeName, err := ioutil.ReadFile(path.Join(p.depotPath, id, "bridge-name"))
	if err == nil {
		if err := p.bridges.Release(string(bridgeName), id); err != nil {
			return fmt.Errorf("containerpool: release bridge %s: %v", bridgeName, err)
		}
	}

	rootfsProvider, err := ioutil.ReadFile(path.Join(p.depotPath, id, "rootfs-provider"))
	if err != nil {
		rootfsProvider = []byte("invalid-rootfs-provider")
	}

	destroy := exec.Command(path.Join(p.binPath, "destroy.sh"), path.Join(p.depotPath, id))

	err = pRunner.Run(destroy)
	if err != nil {
		return err
	}

	if shouldCleanRootfs(string(rootfsProvider)) {
		if err = p.rootfsRemover.Remove(layercake.ContainerID(id)); err != nil {
			return err
		}
	}

	p.filterProvider.ProvideFilter(id).TearDown()
	return nil
}
func (c *LinuxContainer) Start() error {
	cLog := c.logger.Session("start")

	cLog.Debug("starting")

	start := exec.Command(path.Join(c.ContainerPath, "start.sh"))
	start.Env = []string{
		"id=" + c.ID(),
		"PATH=" + os.Getenv("PATH"),
	}

	cRunner := logging.Runner{
		CommandRunner: c.runner,
		Logger:        cLog,
	}

	err := cRunner.Run(start)
	if err != nil {
		cLog.Error("failed-to-start", err)
		return fmt.Errorf("container: start: %v", err)
	}

	c.setState(linux_backend.StateActive)

	cLog.Info("started")

	return nil
}
func (m *ContainerBandwidthManager) SetLimits(
	logger lager.Logger,
	limits garden.BandwidthLimits,
) error {
	runner := logging.Runner{
		CommandRunner: m.runner,
		Logger:        logger,
	}

	setRate := exec.Command(path.Join(m.containerPath, "net_rate.sh"))
	setRate.Env = []string{
		fmt.Sprintf("BURST=%d", limits.BurstRateInBytesPerSecond),
		fmt.Sprintf("RATE=%d", limits.RateInBytesPerSecond*8),
	}

	return runner.Run(setRate)
}
func (c *LinuxContainer) StreamIn(spec garden.StreamInSpec) error {
	nsTarPath := path.Join(c.ContainerPath, "bin", "nstar")
	tarPath := path.Join(c.ContainerPath, "bin", "tar")
	pidPath := path.Join(c.ContainerPath, "run", "wshd.pid")

	pidFile, err := os.Open(pidPath)
	if err != nil {
		return err
	}

	var pid int
	_, err = fmt.Fscanf(pidFile, "%d", &pid)
	if err != nil {
		return err
	}

	user := spec.User
	if user == "" {
		user = "******"
	}

	buf := new(bytes.Buffer)
	tar := exec.Command(
		nsTarPath,
		tarPath,
		strconv.Itoa(pid),
		user,
		spec.Path,
	)
	tar.Stdout = buf
	tar.Stderr = buf

	tar.Stdin = spec.TarStream

	cLog := c.logger.Session("stream-in")

	cRunner := logging.Runner{
		CommandRunner: c.runner,
		Logger:        cLog,
	}

	if err := cRunner.Run(tar); err != nil {
		return fmt.Errorf("error streaming in: %v. Output: %s", err, buf.String())
	}
	return nil
}
func (m *BtrfsQuotaManager) quotaInfo(logger lager.Logger, path string) (*QuotaInfo, error) {
	var (
		cmdOut bytes.Buffer
		info   QuotaInfo
		limit  string
	)

	runner := logging.Runner{
		Logger:        logger,
		CommandRunner: m.Runner,
	}

	syncCmd := exec.Command("sync")
	if err := runner.Run(syncCmd); err != nil {
		return nil, fmt.Errorf("quota_manager: sync disk i/o: %s", err)
	}

	cmd := exec.Command("sh", "-c", fmt.Sprintf("btrfs qgroup show -rF --raw %s", path))
	cmd.Stdout = &cmdOut

	if err := runner.Run(cmd); err != nil {
		return nil, fmt.Errorf("quota_manager: run quota info: %v", err)
	}

	lines := strings.Split(strings.TrimSpace(cmdOut.String()), "\n")

	_, err := fmt.Sscanf(lines[len(lines)-1], "%s %d %d %s", &info.Id, &info.TotalUsage, &info.ExclusiveUsage, &limit)
	if err != nil {
		return nil, fmt.Errorf("quota_manager: parse quota info: %v", err)
	}

	if limit != "none" {
		limitBytes, err := strconv.ParseUint(limit, 10, 64)
		if err != nil {
			return nil, fmt.Errorf("quota_manager: parse quota limit: %v", err)
		}
		info.Limit = limitBytes
	}

	return &info, nil
}
func (c *LinuxContainer) Start() error {
	cLog := c.logger.Session("start", lager.Data{"handle": c.Handle()})
	cLog.Debug("starting")

	cLog.Debug("iptables-setup-starting")
	err := c.ipTablesManager.ContainerSetup(
		c.ID(), c.Resources.Bridge, c.Resources.Network.IP, c.Resources.Network.Subnet,
	)
	if err != nil {
		cLog.Error("iptables-setup-failed", err)
		return fmt.Errorf("container: start: %v", err)
	}
	cLog.Debug("iptables-setup-ended")

	cLog.Debug("wshd-start-starting")
	start := exec.Command(path.Join(c.ContainerPath, "start.sh"))
	start.Env = []string{
		"id=" + c.ID(),
		"PATH=" + os.Getenv("PATH"),
	}

	cRunner := logging.Runner{
		CommandRunner: c.runner,
		Logger:        cLog,
	}

	err = cRunner.Run(start)
	if err != nil {
		cLog.Error("wshd-start-failed", err)
		return fmt.Errorf("container: start: %v", err)
	}
	cLog.Debug("wshd-start-ended")

	c.setState(linux_backend.StateActive)

	cLog.Debug("ended")
	return nil
}
func (m *BtrfsQuotaManager) SetLimits(logger lager.Logger, subvolumePath string, limits garden.DiskLimits) error {
	runner := logging.Runner{
		Logger:        logger,
		CommandRunner: m.Runner,
	}

	quotaInfo, err := m.quotaInfo(logger, subvolumePath)
	if err != nil {
		return err
	}

	args := []string{"qgroup", "limit"}
	if limits.Scope == garden.DiskLimitScopeExclusive {
		args = append(args, "-e")
	}

	args = append(args, fmt.Sprintf("%d", limits.ByteHard), quotaInfo.Id, subvolumePath)
	cmd := exec.Command("btrfs", args...)
	if err := runner.Run(cmd); err != nil {
		return fmt.Errorf("quota_manager: failed to apply limit: %v", err)
	}

	return nil
}
func (c *LinuxContainer) Restore(snapshot linux_backend.LinuxContainerSpec) error {
	cLog := c.logger.Session("restore")

	cLog.Debug("restoring")

	cRunner := logging.Runner{
		CommandRunner: c.runner,
		Logger:        cLog,
	}

	c.setState(linux_backend.State(snapshot.State))

	c.Env = snapshot.Env

	for _, ev := range snapshot.Events {
		c.registerEvent(ev)
	}

	if snapshot.Limits.Memory != nil {
		err := c.LimitMemory(*snapshot.Limits.Memory)
		if err != nil {
			cLog.Error("failed-to-limit-memory", err)
			return err
		}
	}

	signaller := c.processSignaller()

	for _, process := range snapshot.Processes {
		cLog.Info("restoring-process", lager.Data{
			"process": process,
		})

		c.processIDPool.Restore(process.ID)
		c.processTracker.Restore(process.ID, signaller)
	}

	net := exec.Command(path.Join(c.ContainerPath, "net.sh"), "setup")

	if err := cRunner.Run(net); err != nil {
		cLog.Error("failed-to-reenforce-network-rules", err)
		return err
	}

	for _, in := range snapshot.NetIns {
		if _, _, err := c.NetIn(in.HostPort, in.ContainerPort); err != nil {
			cLog.Error("failed-to-reenforce-port-mapping", err)
			return err
		}
	}

	for _, out := range snapshot.NetOuts {
		if err := c.NetOut(out); err != nil {
			cLog.Error("failed-to-reenforce-net-out", err)
			return err
		}
	}

	cLog.Info("restored")

	return nil
}
Exemple #9
0
func (p *LinuxResourcePool) acquireSystemResources(spec garden.ContainerSpec, id string, resources *linux_backend.Resources, pLog lager.Logger) (string, process.Env, error) {
	containerPath := path.Join(p.depotPath, id)
	if err := os.MkdirAll(containerPath, 0755); err != nil {
		return "", nil, fmt.Errorf("resource_pool: creating container directory: %v", err)
	}

	rootFSPath, rootFSEnvVars, err := p.setupContainerDirectories(spec, id, resources, pLog)
	if err != nil {
		os.RemoveAll(containerPath)
		return "", nil, err
	}

	createCmd := path.Join(p.binPath, "create.sh")
	create := exec.Command(createCmd, containerPath)
	suff, _ := resources.Network.Subnet.Mask.Size()
	env := process.Env{
		"id":                   id,
		"rootfs_path":          rootFSPath,
		"network_host_ip":      subnets.GatewayIP(resources.Network.Subnet).String(),
		"network_container_ip": resources.Network.IP.String(),
		"network_cidr_suffix":  strconv.Itoa(suff),
		"network_cidr":         resources.Network.Subnet.String(),
		"external_ip":          p.externalIP.String(),
		"container_iface_mtu":  fmt.Sprintf("%d", p.mtu),
		"bridge_iface":         resources.Bridge,
		"root_uid":             strconv.FormatUint(uint64(resources.RootUID), 10),
		"PATH":                 os.Getenv("PATH"),
	}
	create.Env = env.Array()

	pRunner := logging.Runner{
		CommandRunner: p.runner,
		Logger:        pLog.Session("create-script"),
	}

	err = pRunner.Run(create)
	defer cleanup(&err, func() {
		p.tryReleaseSystemResources(pLog, id)
	})

	if err != nil {
		pLog.Error("create-command-failed", err, lager.Data{
			"CreateCmd": createCmd,
			"Env":       create.Env,
		})
		return "", nil, err
	}

	err = p.saveRootFSProvider(id, "docker-composite")
	if err != nil {
		pLog.Error("save-rootfs-provider-failed", err, lager.Data{
			"Id":     id,
			"rootfs": spec.RootFSPath,
		})
		return "", nil, err
	}

	err = p.saveContainerVersion(id)
	if err != nil {
		pLog.Error("save-container-version-failed", err, lager.Data{
			"Id":            id,
			"ContainerPath": containerPath,
		})
		return "", nil, err
	}

	err = p.writeBindMounts(containerPath, rootFSPath, spec.BindMounts, resources.RootUID)
	if err != nil {
		pLog.Error("bind-mounts-failed", err)
		return "", nil, err
	}

	return rootFSPath, rootFSEnvVars, nil
}
func (m *ContainerBandwidthManager) GetLimits(logger lager.Logger) (garden.ContainerBandwidthStat, error) {
	limits := garden.ContainerBandwidthStat{}

	runner := logging.Runner{
		CommandRunner: m.runner,
		Logger:        logger,
	}

	egressOut := new(bytes.Buffer)

	egress := exec.Command(path.Join(m.containerPath, "net.sh"), "get_egress_info")
	egress.Env = []string{"ID=" + m.containerID}
	egress.Stdout = egressOut

	err := runner.Run(egress)
	if err != nil {
		return limits, err
	}

	matches := IN_RATE_PATTERN.FindStringSubmatch(string(egressOut.Bytes()))
	if matches != nil {
		inRate, err := strconv.ParseUint(matches[1], 10, 0)
		if err != nil {
			return limits, err
		}

		inBurst, err := strconv.ParseUint(matches[3], 10, 0)
		if err != nil {
			return limits, err
		}

		inRateUnit := matches[2]
		inBurstUnit := matches[4]

		limits.InRate = convertUnits(inRate, inRateUnit) / 8
		limits.InBurst = convertUnits(inBurst, inBurstUnit)
	}

	ingressOut := new(bytes.Buffer)

	ingress := exec.Command(path.Join(m.containerPath, "net.sh"), "get_ingress_info")
	ingress.Env = []string{"ID=" + m.containerID}
	ingress.Stdout = ingressOut

	err = runner.Run(ingress)
	if err != nil {
		return limits, err
	}

	matches = OUT_RATE_PATTERN.FindStringSubmatch(string(ingressOut.Bytes()))
	if matches != nil {
		outRate, err := strconv.ParseUint(matches[1], 10, 0)
		if err != nil {
			return limits, err
		}

		outBurst, err := strconv.ParseUint(matches[3], 10, 0)
		if err != nil {
			return limits, err
		}

		outRateUnit := matches[2]
		outBurstUnit := matches[4]

		limits.OutRate = convertUnits(outRate, outRateUnit) / 8
		limits.OutBurst = convertUnits(outBurst, outBurstUnit)
	}

	return limits, err
}
func (p *LinuxResourcePool) acquireSystemResources(id, handle, containerPath, rootFSPath string, resources *linux_backend.Resources, bindMounts []garden.BindMount, diskQuota int64, pLog lager.Logger) (string, process.Env, error) {
	if err := os.MkdirAll(containerPath, 0755); err != nil {
		return "", nil, fmt.Errorf("containerpool: creating container directory: %v", err)
	}

	rootfsURL, err := url.Parse(rootFSPath)
	if err != nil {
		pLog.Error("parse-rootfs-path-failed", err, lager.Data{
			"RootFSPath": rootFSPath,
		})
		return "", nil, err
	}

	provider, found := p.rootfsProviders[rootfsURL.Scheme]
	if !found {
		pLog.Error("unknown-rootfs-provider", nil, lager.Data{
			"provider": rootfsURL.Scheme,
		})
		return "", nil, ErrUnknownRootFSProvider
	}

	rootfsPath, rootFSEnvVars, err := provider.ProvideRootFS(pLog.Session("create-rootfs"), id, rootfsURL, resources.RootUID != 0, diskQuota)
	if err != nil {
		pLog.Error("provide-rootfs-failed", err)
		return "", nil, err
	}

	if resources.Bridge, err = p.bridges.Reserve(resources.Network.Subnet, id); err != nil {
		pLog.Error("reserve-bridge-failed", err, lager.Data{
			"Id":     id,
			"Subnet": resources.Network.Subnet,
			"Bridge": resources.Bridge,
		})

		p.rootfsRemover.Remove(layercake.ContainerID(rootfsPath))
		return "", nil, err
	}

	if err = p.saveBridgeName(id, resources.Bridge); err != nil {
		pLog.Error("save-bridge-name-failed", err, lager.Data{
			"Id":     id,
			"Bridge": resources.Bridge,
		})

		p.rootfsRemover.Remove(layercake.ContainerID(rootfsPath))
		return "", nil, err
	}

	createCmd := path.Join(p.binPath, "create.sh")
	create := exec.Command(createCmd, containerPath)
	suff, _ := resources.Network.Subnet.Mask.Size()
	env := process.Env{
		"id":                   id,
		"rootfs_path":          rootfsPath,
		"network_host_ip":      subnets.GatewayIP(resources.Network.Subnet).String(),
		"network_container_ip": resources.Network.IP.String(),
		"network_cidr_suffix":  strconv.Itoa(suff),
		"network_cidr":         resources.Network.Subnet.String(),
		"external_ip":          p.externalIP.String(),
		"container_iface_mtu":  fmt.Sprintf("%d", p.mtu),
		"bridge_iface":         resources.Bridge,
		"root_uid":             strconv.FormatUint(uint64(resources.RootUID), 10),
		"PATH":                 os.Getenv("PATH"),
	}
	create.Env = env.Array()

	pRunner := logging.Runner{
		CommandRunner: p.runner,
		Logger:        pLog.Session("create-script"),
	}

	err = pRunner.Run(create)
	defer cleanup(&err, func() {
		p.tryReleaseSystemResources(p.logger, id)
	})

	if err != nil {
		p.logger.Error("create-command-failed", err, lager.Data{
			"CreateCmd": createCmd,
			"Env":       create.Env,
		})
		return "", nil, err
	}

	err = p.saveRootFSProvider(id, provider.Name())
	if err != nil {
		p.logger.Error("save-rootfs-provider-failed", err, lager.Data{
			"Id":     id,
			"rootfs": rootfsURL.String(),
		})
		return "", nil, err
	}

	err = p.saveContainerVersion(id)
	if err != nil {
		p.logger.Error("save-container-version-failed", err, lager.Data{
			"Id":            id,
			"ContainerPath": containerPath,
		})
		return "", nil, err
	}

	err = p.writeBindMounts(containerPath, rootfsPath, bindMounts)
	if err != nil {
		p.logger.Error("bind-mounts-failed", err)
		return "", nil, err
	}

	filterLog := pLog.Session("setup-filter")

	filterLog.Debug("starting")
	if err = p.filterProvider.ProvideFilter(id).Setup(handle); err != nil {
		p.logger.Error("set-up-filter-failed", err)
		return "", nil, fmt.Errorf("resource_pool: set up filter: %v", err)
	}
	filterLog.Debug("finished")

	return rootfsPath, rootFSEnvVars, nil
}