Beispiel #1
0
// FindOnionIPAddress finds the IP address of a target container that is connected
// to the given network. This IP address is accessible from any other container
// connected to the same network.
func FindOnionIPAddress(cli *client.Client, target, network string) (string, error) {
	inspect, err := cli.ContainerInspect(target)
	if err != nil {
		return "", err
	}

	endpoint, ok := inspect.NetworkSettings.Networks[network]
	if !ok {
		return "", fmt.Errorf("inspect container: container '%s' not connected to network '%s'", target, network)
	}

	return endpoint.IPAddress, nil
}
Beispiel #2
0
func GetOnionHostname(cli *client.Client, containerID string) (string, error) {
	content, stat, err := cli.CopyFromContainer(containerID, HostnamePath)
	// XXX: This isn't very pretty. But we need to wait until Tor generates
	//      an .onion address, and there's not really any better way of
	//      doing it.
	for err != nil && strings.Contains(err.Error(), "no such file or directory") {
		// Make sure the container hasn't died.
		if inspect, err := cli.ContainerInspect(containerID); err != nil {
			return "", fmt.Errorf("error inspecting container: %s", err)
		} else if !isRunning(inspect.State) {
			return "", fmt.Errorf("container died before the hostname was computed")
		}

		log.Warnf("tor onion hostname not found in container, retrying after a short nap...")
		time.Sleep(500 * time.Millisecond)

		content, stat, err = cli.CopyFromContainer(containerID, HostnamePath)
	}
	if err != nil {
		return "", err
	}
	defer content.Close()

	if stat.Mode.IsDir() {
		return "", fmt.Errorf("hostname file is a directory")
	}

	tr := tar.NewReader(content)
	hdr, err := tr.Next()
	for err != io.EOF {
		if err != nil {
			break
		}

		// XXX: Maybe do filepath.Base()?
		if hdr.Name != "hostname" {
			continue
		}

		data, err := ioutil.ReadAll(tr)
		if err != nil {
			return "", err
		}

		hostname := string(data)
		return strings.TrimSpace(hostname), nil
	}

	return "", fmt.Errorf("hostname file not in copied archive")
}
Beispiel #3
0
// enumberates volumes and  builds refCountsMap, then sync with mount info
func (r refCountsMap) discoverAndSync(c *client.Client, d *vmdkDriver) error {
	// we assume to  have empty refcounts. Let's enforce
	for name := range r {
		delete(r, name)
	}

	filters := filters.NewArgs()
	filters.Add("status", "running")
	filters.Add("status", "paused")
	filters.Add("status", "restarting")
	containers, err := c.ContainerList(context.Background(), types.ContainerListOptions{
		All:    true,
		Filter: filters,
	})
	if err != nil {
		return err
	}

	log.Debugf("Found %d running or paused containers", len(containers))
	for _, ct := range containers {
		containerJSONInfo, err := c.ContainerInspect(context.Background(), ct.ID)
		if err != nil {
			log.Errorf("ContainerInspect failed for %s (err: %v)", ct.Names, err)
			continue
		}
		log.Debugf("  Mounts for %v", ct.Names)

		for _, mount := range containerJSONInfo.Mounts {
			if mount.Driver == driverName {
				r.incr(mount.Name)
				log.Debugf("  name=%v (driver=%s source=%s)",
					mount.Name, mount.Driver, mount.Source)
			}
		}
	}

	// Check that refcounts and actual mount info from Linux match
	// If they don't, unmount unneeded stuff, or yell if something is
	// not mounted but should be (it's error. we should not get there)

	r.getMountInfo()
	r.syncMountsWithRefCounters(d)

	return nil
}
Beispiel #4
0
// FindTargetPorts finds the set of ports EXPOSE'd on the target container. This
// includes non-TCP ports, so callers should make sure they exclude protocols
// not supported by Tor.
func FindTargetPorts(cli *client.Client, target string) ([]nat.Port, error) {
	inspect, err := cli.ContainerInspect(target)
	if err != nil {
		return nil, err
	}

	// Make sure we don't dereference nils.
	if inspect.NetworkSettings == nil || inspect.NetworkSettings.Ports == nil {
		return nil, fmt.Errorf("inspect container: network settings not available")
	}

	// Get keys from map.
	var ports []nat.Port
	for port, _ := range inspect.NetworkSettings.Ports {
		ports = append(ports, port)
	}

	return ports, nil
}
Beispiel #5
0
// newDockerContainerHandler returns a new container.ContainerHandler
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	fsInfo fs.FsInfo,
	storageDriver storageDriver,
	storageDir string,
	cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
	inHostNamespace bool,
	metadataEnvs []string,
	dockerVersion []int,
	ignoreMetrics container.MetricSet,
	thinPoolWatcher *devicemapper.ThinPoolWatcher,
) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	// Generate the equivalent cgroup manager for this container.
	cgroupManager := &cgroupfs.Manager{
		Cgroups: &libcontainerconfigs.Cgroup{
			Name: name,
		},
		Paths: cgroupPaths,
	}

	rootFs := "/"
	if !inHostNamespace {
		rootFs = "/rootfs"
		storageDir = path.Join(rootFs, storageDir)
	}

	id := ContainerNameToDockerId(name)

	// Add the Containers dir where the log files are stored.
	// FIXME: Give `otherStorageDir` a more descriptive name.
	otherStorageDir := path.Join(storageDir, pathToContainersDir, id)

	rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
	if err != nil {
		return nil, err
	}

	// Determine the rootfs storage dir OR the pool name to determine the device
	var (
		rootfsStorageDir string
		poolName         string
	)
	switch storageDriver {
	case aufsStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
	case overlayStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
	case devicemapperStorageDriver:
		status, err := Status()
		if err != nil {
			return nil, fmt.Errorf("unable to determine docker status: %v", err)
		}

		poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
	}

	// TODO: extract object mother method
	handler := &dockerContainerHandler{
		id:                 id,
		client:             client,
		name:               name,
		machineInfoFactory: machineInfoFactory,
		cgroupPaths:        cgroupPaths,
		cgroupManager:      cgroupManager,
		storageDriver:      storageDriver,
		fsInfo:             fsInfo,
		rootFs:             rootFs,
		poolName:           poolName,
		rootfsStorageDir:   rootfsStorageDir,
		envs:               make(map[string]string),
		ignoreMetrics:      ignoreMetrics,
		thinPoolWatcher:    thinPoolWatcher,
	}

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.ContainerInspect(context.Background(), id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}
	// Timestamp returned by Docker is in time.RFC3339Nano format.
	handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
	if err != nil {
		// This should not happen, report the error just in case
		return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
	}
	handler.pid = ctnr.State.Pid

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
	handler.labels = ctnr.Config.Labels
	handler.image = ctnr.Config.Image
	handler.networkMode = ctnr.HostConfig.NetworkMode
	handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]

	// Obtain the IP address for the contianer.
	// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
	// This happens in cases such as kubernetes where the containers doesn't have an IP address itself and we need to use the pod's address
	ipAddress := ctnr.NetworkSettings.IPAddress
	networkMode := string(ctnr.HostConfig.NetworkMode)
	if ipAddress == "" && strings.HasPrefix(networkMode, "container:") {
		containerId := strings.TrimPrefix(networkMode, "container:")
		c, err := client.ContainerInspect(context.Background(), containerId)
		if err != nil {
			return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
		}
		ipAddress = c.NetworkSettings.IPAddress
	}

	handler.ipAddress = ipAddress

	if !ignoreMetrics.Has(container.DiskUsageMetrics) {
		handler.fsHandler = &dockerFsHandler{
			fsHandler:       common.NewFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo),
			thinPoolWatcher: thinPoolWatcher,
			deviceID:        handler.deviceID,
		}
	}

	// split env vars to get metadata map.
	for _, exposedEnv := range metadataEnvs {
		for _, envVar := range ctnr.Config.Env {
			splits := strings.SplitN(envVar, "=", 2)
			if splits[0] == exposedEnv {
				handler.envs[strings.ToLower(exposedEnv)] = splits[1]
			}
		}
	}

	return handler, nil
}