func getIntNames(netID string, docker *dockerclient.Client) (*intNames, error) {
	net, err := docker.NetworkInspect(context.Background(), netID)
	if err != nil {
		log.Errorf("Error getting networks: %v", err)
		return nil, err
	}

	names := &intNames{}

	if net.Driver != "vxlan" {
		log.Errorf("Network %v is not a vxlan network", netID)
		return nil, errors.New("Not a vxlan network")
	}

	names.VxlanName = "vx_" + netID[:12]

	// get interface names from options first
	for k, v := range net.Options {
		if k == "vxlanName" {
			names.VxlanName = v
		}
	}

	return names, nil
}
Beispiel #2
0
// FindOnionIPAddress finds the IP address of a target container that is connected
// to the given network. This IP address is accessible from any other container
// connected to the same network.
func FindOnionIPAddress(cli *client.Client, target, network string) (string, error) {
	inspect, err := cli.ContainerInspect(target)
	if err != nil {
		return "", err
	}

	endpoint, ok := inspect.NetworkSettings.Networks[network]
	if !ok {
		return "", fmt.Errorf("inspect container: container '%s' not connected to network '%s'", target, network)
	}

	return endpoint.IPAddress, nil
}
// returns nil for NOT_FOUND and  if volume exists
// still fails the test if driver for this volume is not vmdk
func volumeVmdkExists(t *testing.T, c *client.Client, vol string) *types.Volume {
	reply, err := c.VolumeList(context.Background(), filters.Args{})
	if err != nil {
		t.Fatalf("Failed to enumerate  volumes: %v", err)
	}

	for _, v := range reply.Volumes {
		//	t.Log(v.Name, v.Driver, v.Mountpoint)
		if v.Name == vol {
			return v
		}
	}
	return nil
}
func getGateway(netID string, docker dockerclient.Client) (string, error) {
	net, err := docker.NetworkInspect(context.Background(), netID)
	if err != nil {
		log.Errorf("Error inspecting network: %v", err)
		return "", err
	}

	for _, config := range net.IPAM.Config {
		if config.Gateway != "" {
			return config.Gateway, nil
		}
	}
	return "", nil
}
Beispiel #5
0
// enumberates volumes and  builds refCountsMap, then sync with mount info
func (r refCountsMap) discoverAndSync(c *client.Client, d *vmdkDriver) error {
	// we assume to  have empty refcounts. Let's enforce
	for name := range r {
		delete(r, name)
	}

	filters := filters.NewArgs()
	filters.Add("status", "running")
	filters.Add("status", "paused")
	filters.Add("status", "restarting")
	containers, err := c.ContainerList(context.Background(), types.ContainerListOptions{
		All:    true,
		Filter: filters,
	})
	if err != nil {
		return err
	}

	log.Debugf("Found %d running or paused containers", len(containers))
	for _, ct := range containers {
		containerJSONInfo, err := c.ContainerInspect(context.Background(), ct.ID)
		if err != nil {
			log.Errorf("ContainerInspect failed for %s (err: %v)", ct.Names, err)
			continue
		}
		log.Debugf("  Mounts for %v", ct.Names)

		for _, mount := range containerJSONInfo.Mounts {
			if mount.Driver == driverName {
				r.incr(mount.Name)
				log.Debugf("  name=%v (driver=%s source=%s)",
					mount.Name, mount.Driver, mount.Source)
			}
		}
	}

	// Check that refcounts and actual mount info from Linux match
	// If they don't, unmount unneeded stuff, or yell if something is
	// not mounted but should be (it's error. we should not get there)

	r.getMountInfo()
	r.syncMountsWithRefCounters(d)

	return nil
}
Beispiel #6
0
// FindTargetPorts finds the set of ports EXPOSE'd on the target container. This
// includes non-TCP ports, so callers should make sure they exclude protocols
// not supported by Tor.
func FindTargetPorts(cli *client.Client, target string) ([]nat.Port, error) {
	inspect, err := cli.ContainerInspect(target)
	if err != nil {
		return nil, err
	}

	// Make sure we don't dereference nils.
	if inspect.NetworkSettings == nil || inspect.NetworkSettings.Ports == nil {
		return nil, fmt.Errorf("inspect container: network settings not available")
	}

	// Get keys from map.
	var ports []nat.Port
	for port, _ := range inspect.NetworkSettings.Ports {
		ports = append(ports, port)
	}

	return ports, nil
}
Beispiel #7
0
// CreateOnionNetwork creates a new bridge network with a random (but recognisable)
// name. If it can't create a name after XXX attempts, it will return an error.
func CreateOnionNetwork(cli *client.Client, ident string) (string, error) {
	options := types.NetworkCreate{
		Name:           ident,
		CheckDuplicate: true,
		Driver:         "bridge",
	}

	resp, err := cli.NetworkCreate(options)
	if err != nil {
		// TODO: Retry if we get "already exists".
		return "", err
	}

	if resp.Warning != "" {
		log.Warn(resp.Warning)
	}

	return ident, nil
}
// runs a command in a container , with volume mounted
// returns completion code.
// exits (t.Fatal() or create/start/wait errors
func runContainerCmd(t *testing.T, client *client.Client, volumeName string,
	image string, cmd *strslice.StrSlice, addr string) int {

	mountPoint := getMountpoint(volumeName)
	bind := volumeName + ":" + mountPoint
	t.Logf("Running cmd=%v with vol=%s on client %s", cmd, volumeName, addr)

	r, err := client.ContainerCreate(context.Background(),
		&container.Config{Image: image, Cmd: *cmd,
			Volumes: map[string]struct{}{mountPoint: {}}},
		&container.HostConfig{Binds: []string{bind}}, nil, "")
	if err != nil {
		t.Fatalf("\tContainer create failed: %v", err)
	}

	err = client.ContainerStart(context.Background(), r.ID,
		types.ContainerStartOptions{})
	if err != nil {
		t.Fatalf("\tContainer start failed: id=%s, err %v", r.ID, err)
	}

	code, err := client.ContainerWait(context.Background(), r.ID)
	if err != nil {
		t.Fatalf("\tContainer wait failed: id=%s, err %v", r.ID, err)
	}

	if removeContainers == false {
		t.Logf("\tSkipping container removal, id=%s (removeContainers == false)",
			r.ID)
		return code
	}

	err = client.ContainerRemove(context.Background(), r.ID,
		types.ContainerRemoveOptions{
			RemoveVolumes: true,
			Force:         true,
		})
	if err != nil {
		t.Fatalf("\nContainer removal failed: %v", err)
	}

	return code
}
// newKubeDockerClient creates an kubeDockerClient from an existing docker client. If requestTimeout is 0,
// defaultTimeout will be applied.
func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout time.Duration) DockerInterface {
	if requestTimeout == 0 {
		requestTimeout = defaultTimeout
	}

	k := &kubeDockerClient{
		client:  dockerClient,
		timeout: requestTimeout,
	}
	// Notice that this assumes that docker is running before kubelet is started.
	v, err := k.Version()
	if err != nil {
		glog.Errorf("failed to retrieve docker version: %v", err)
		glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.")
	} else {
		// Update client version with real api version.
		dockerClient.UpdateClientVersion(v.APIVersion)
	}
	return k
}
Beispiel #10
0
func runTorContainer(cli *client.Client, ident, imageID, network string) (string, error) {
	config := &types.ContainerCreateConfig{
		Name: ident,
		Config: &containerTypes.Config{
			Image: imageID,
		},
	}

	resp, err := cli.ContainerCreate(config.Config, config.HostConfig, config.NetworkingConfig, config.Name)
	if err != nil {
		return "", err
	}
	// TODO: Remove container on failure.

	for _, warning := range resp.Warnings {
		log.Warn(warning)
	}

	if err := cli.ContainerStart(resp.ID); err != nil {
		return "", err
	}

	// Connect to the network.
	if err := cli.NetworkConnect(network, resp.ID, nil); err != nil {
		return "", err
	}

	return resp.ID, err
}
Beispiel #11
0
func GetOnionHostname(cli *client.Client, containerID string) (string, error) {
	content, stat, err := cli.CopyFromContainer(containerID, HostnamePath)
	// XXX: This isn't very pretty. But we need to wait until Tor generates
	//      an .onion address, and there's not really any better way of
	//      doing it.
	for err != nil && strings.Contains(err.Error(), "no such file or directory") {
		// Make sure the container hasn't died.
		if inspect, err := cli.ContainerInspect(containerID); err != nil {
			return "", fmt.Errorf("error inspecting container: %s", err)
		} else if !isRunning(inspect.State) {
			return "", fmt.Errorf("container died before the hostname was computed")
		}

		log.Warnf("tor onion hostname not found in container, retrying after a short nap...")
		time.Sleep(500 * time.Millisecond)

		content, stat, err = cli.CopyFromContainer(containerID, HostnamePath)
	}
	if err != nil {
		return "", err
	}
	defer content.Close()

	if stat.Mode.IsDir() {
		return "", fmt.Errorf("hostname file is a directory")
	}

	tr := tar.NewReader(content)
	hdr, err := tr.Next()
	for err != io.EOF {
		if err != nil {
			break
		}

		// XXX: Maybe do filepath.Base()?
		if hdr.Name != "hostname" {
			continue
		}

		data, err := ioutil.ReadAll(tr)
		if err != nil {
			return "", err
		}

		hostname := string(data)
		return strings.TrimSpace(hostname), nil
	}

	return "", fmt.Errorf("hostname file not in copied archive")
}
Beispiel #12
0
// PurgeOnionNetwork purges an onion network, disconnecting all containers with
// it. We assume that nobody is adding containers to this network.
func PurgeOnionNetwork(cli *client.Client, network string) error {
	inspect, err := cli.NetworkInspect(network)
	if err != nil {
		return err
	}

	for container, _ := range inspect.Containers {
		log.Infof("purge network %s: disconnecting container %s", network, container)
		if err := cli.NetworkDisconnect(network, container, true); err != nil {
			return err
		}
	}

	return cli.NetworkRemove(network)
}
Beispiel #13
0
// newDockerContainerHandler returns a new container.ContainerHandler
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	fsInfo fs.FsInfo,
	storageDriver storageDriver,
	storageDir string,
	cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
	inHostNamespace bool,
	metadataEnvs []string,
	dockerVersion []int,
	ignoreMetrics container.MetricSet,
	thinPoolWatcher *devicemapper.ThinPoolWatcher,
) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	// Generate the equivalent cgroup manager for this container.
	cgroupManager := &cgroupfs.Manager{
		Cgroups: &libcontainerconfigs.Cgroup{
			Name: name,
		},
		Paths: cgroupPaths,
	}

	rootFs := "/"
	if !inHostNamespace {
		rootFs = "/rootfs"
		storageDir = path.Join(rootFs, storageDir)
	}

	id := ContainerNameToDockerId(name)

	// Add the Containers dir where the log files are stored.
	// FIXME: Give `otherStorageDir` a more descriptive name.
	otherStorageDir := path.Join(storageDir, pathToContainersDir, id)

	rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
	if err != nil {
		return nil, err
	}

	// Determine the rootfs storage dir OR the pool name to determine the device
	var (
		rootfsStorageDir string
		poolName         string
	)
	switch storageDriver {
	case aufsStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
	case overlayStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
	case devicemapperStorageDriver:
		status, err := Status()
		if err != nil {
			return nil, fmt.Errorf("unable to determine docker status: %v", err)
		}

		poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
	}

	// TODO: extract object mother method
	handler := &dockerContainerHandler{
		id:                 id,
		client:             client,
		name:               name,
		machineInfoFactory: machineInfoFactory,
		cgroupPaths:        cgroupPaths,
		cgroupManager:      cgroupManager,
		storageDriver:      storageDriver,
		fsInfo:             fsInfo,
		rootFs:             rootFs,
		poolName:           poolName,
		rootfsStorageDir:   rootfsStorageDir,
		envs:               make(map[string]string),
		ignoreMetrics:      ignoreMetrics,
		thinPoolWatcher:    thinPoolWatcher,
	}

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.ContainerInspect(context.Background(), id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}
	// Timestamp returned by Docker is in time.RFC3339Nano format.
	handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
	if err != nil {
		// This should not happen, report the error just in case
		return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
	}
	handler.pid = ctnr.State.Pid

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
	handler.labels = ctnr.Config.Labels
	handler.image = ctnr.Config.Image
	handler.networkMode = ctnr.HostConfig.NetworkMode
	handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]

	// Obtain the IP address for the contianer.
	// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
	// This happens in cases such as kubernetes where the containers doesn't have an IP address itself and we need to use the pod's address
	ipAddress := ctnr.NetworkSettings.IPAddress
	networkMode := string(ctnr.HostConfig.NetworkMode)
	if ipAddress == "" && strings.HasPrefix(networkMode, "container:") {
		containerId := strings.TrimPrefix(networkMode, "container:")
		c, err := client.ContainerInspect(context.Background(), containerId)
		if err != nil {
			return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
		}
		ipAddress = c.NetworkSettings.IPAddress
	}

	handler.ipAddress = ipAddress

	if !ignoreMetrics.Has(container.DiskUsageMetrics) {
		handler.fsHandler = &dockerFsHandler{
			fsHandler:       common.NewFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo),
			thinPoolWatcher: thinPoolWatcher,
			deviceID:        handler.deviceID,
		}
	}

	// split env vars to get metadata map.
	for _, exposedEnv := range metadataEnvs {
		for _, envVar := range ctnr.Config.Env {
			splits := strings.SplitN(envVar, "=", 2)
			if splits[0] == exposedEnv {
				handler.envs[strings.ToLower(exposedEnv)] = splits[1]
			}
		}
	}

	return handler, nil
}
Beispiel #14
0
func buildTorImage(cli *client.Client, ctx io.Reader) (string, error) {
	// XXX: There's currently no way to get the image ID of a build without
	//      manually parsing the output, or tagging the image. Since I'm not in
	//      the mood for the former, we can tag the build with a random name.
	//      Unfortunately, untagging of images isn't supported, so we'll have to
	//      use a name that allows us to not pollute the host.

	options := types.ImageBuildOptions{
		// XXX: If we SuppressOutput we can get just the image ID, but we lose
		//      being able to tell users what the status of the build is.
		//SuppressOutput: true,
		Tags:        []string{MkonionTag},
		Remove:      true,
		ForceRemove: true,
		Dockerfile:  "Dockerfile",
		Context:     ctx,
	}

	build, err := cli.ImageBuild(options)
	if err != nil {
		return "", err
	}

	// XXX: For some weird reason, at this point the build has not finished. We
	//      need to wait for build.Body to be closed. We might as well tell the
	//      user what the status of the build is.
	log.Infof("building %s", MkonionTag)
	dec := json.NewDecoder(build.Body)
	for {
		// Modified from pkg/jsonmessage in Docker.
		type JSONMessage struct {
			Stream string `json:"stream,omitempty"`
			Status string `json:"status,omitempty"`
		}

		// Decode the JSONMessages.
		var jm JSONMessage
		if err := dec.Decode(&jm); err != nil {
			if err == io.EOF {
				break
			}
			return "", err
		}

		jm.Stream = strings.TrimSpace(jm.Stream)
		jm.Status = strings.TrimSpace(jm.Status)

		// Log the status.
		if jm.Stream != "" {
			log.Info(jm.Stream)
		}
		if jm.Status != "" {
			log.Info(jm.Status)
		}
	}

	inspect, _, err := cli.ImageInspectWithRaw(MkonionTag, false)
	if err != nil {
		// XXX: Should probably clean up the built image here?
		return "", err
	}

	log.Infof("successfully built %s image", MkonionTag)
	return inspect.ID, nil
}
Beispiel #15
0
// ConnectOnionNetwork connects a target container to the onion network, allowing
// the container to be accessed by the Tor relay container.
func ConnectOnionNetwork(cli *client.Client, target, network string) error {
	// XXX: Should configure this to use a subnet like 10.x.x.x.
	options := &networkTypes.EndpointSettings{}
	return cli.NetworkConnect(network, target, options)
}
Beispiel #16
0
func newContainer(ctx context.Context, client *client.Client, profile *Profile, name, executable string, args, env map[string]string) (pr *process, err error) {
	defer apexctx.GetLogger(ctx).Trace("spawning container").Stop(&err)

	var image string
	if registry := profile.Registry; registry != "" {
		image = registry + "/" + name
	} else {
		image = name
	}

	var Env = make([]string, 0, len(env))
	for k, v := range env {
		Env = append(Env, k+"="+v)
	}

	var binds = make([]string, 1, len(profile.Binds)+1)
	binds[0] = filepath.Dir(args["--endpoint"]) + ":" + profile.RuntimePath
	binds = append(binds, profile.Binds...)

	// update args["--endpoint"] according to the container's point of view
	args["--endpoint"] = filepath.Join(profile.RuntimePath, filepath.Base(args["--endpoint"]))

	var Cmd = make(strslice.StrSlice, 1, len(args)+1)
	Cmd[0] = executable
	for k, v := range args {
		Cmd = append(Cmd, k, v)
	}

	config := container.Config{
		AttachStdin:  false,
		AttachStdout: true,
		AttachStderr: true,

		Env:        Env,
		Cmd:        Cmd,
		Image:      image,
		WorkingDir: profile.Cwd,
		Labels:     map[string]string{isolateDockerLabel: name},
	}

	apexctx.GetLogger(ctx).Info("applying Resource limits")
	var resources = container.Resources{
		Memory:     profile.Resources.Memory,
		CPUShares:  profile.Resources.CPUShares,
		CPUPeriod:  profile.Resources.CPUPeriod,
		CPUQuota:   profile.Resources.CPUQuota,
		CpusetCpus: profile.Resources.CpusetCpus,
		CpusetMems: profile.Resources.CpusetMems,
	}

	hostConfig := container.HostConfig{
		NetworkMode: profile.NetworkMode,
		Binds:       binds,
		Resources:   resources,
	}

	if len(profile.Tmpfs) != 0 {
		buff := new(bytes.Buffer)
		for k, v := range profile.Tmpfs {
			fmt.Fprintf(buff, "%s: %s;", k, v)
		}
		apexctx.GetLogger(ctx).Infof("mounting `tmpfs` to container: %s", buff.String())

		hostConfig.Tmpfs = profile.Tmpfs
	}

	// NOTE: It should be nil
	var networkingConfig *network.NetworkingConfig

	resp, err := client.ContainerCreate(ctx, &config, &hostConfig, networkingConfig, "")
	if err != nil {
		apexctx.GetLogger(ctx).WithError(err).Error("unable to create a container")
		return nil, err
	}

	for _, warn := range resp.Warnings {
		apexctx.GetLogger(ctx).Warnf("%s warning: %s", resp.ID, warn)
	}

	ctx, cancel := context.WithCancel(ctx)
	pr = &process{
		ctx:          ctx,
		cancellation: cancel,
		client:       client,
		containerID:  resp.ID,
	}

	return pr, nil
}