Beispiel #1
0
// AppReaperUnit writes an app reaper service unit for the given app in the given path using the given unit options.
func (uw *UnitWriter) AppReaperUnit(appName types.ACName, binPath string, opts ...*unit.UnitOption) {
	if uw.err != nil {
		return
	}

	opts = append(opts, []*unit.UnitOption{
		unit.NewUnitOption("Unit", "Description", fmt.Sprintf("%s Reaper", appName)),
		unit.NewUnitOption("Unit", "DefaultDependencies", "false"),
		unit.NewUnitOption("Unit", "StopWhenUnneeded", "yes"),
		unit.NewUnitOption("Unit", "Before", "halt.target"),
		unit.NewUnitOption("Unit", "Conflicts", "exit.target"),
		unit.NewUnitOption("Unit", "Conflicts", "halt.target"),
		unit.NewUnitOption("Unit", "Conflicts", "poweroff.target"),
		unit.NewUnitOption("Service", "RemainAfterExit", "yes"),
		unit.NewUnitOption("Service", "ExecStop", fmt.Sprintf(
			"/reaper.sh \"%s\" \"%s\" \"%s\"",
			appName,
			common.RelAppRootfsPath(appName),
			binPath,
		)),
	}...)

	uw.WriteUnit(
		ServiceUnitPath(uw.p.Root, types.ACName(fmt.Sprintf("reaper-%s", appName))),
		fmt.Sprintf("failed to write app %q reaper service", appName),
		opts...,
	)
}
Beispiel #2
0
// Create begins launching a container with the provided image manifest and
// reader as the source of the ACI.
func (manager *Manager) Create(
	name string, imageManifest *schema.ImageManifest, image io.ReadCloser,
) (*Container, error) {
	// revalidate the image
	if err := manager.Validate(imageManifest); err != nil {
		return nil, err
	}

	// handle a blank name
	if name == "" {
		name = imageManifest.Name.String()
	}

	// populate the container
	container := &Container{
		manager:          manager,
		log:              manager.Log.Clone(),
		uuid:             uuid.Variant4().String(),
		waitch:           make(chan bool),
		initialImageFile: image,
		image:            imageManifest,
		pod: &schema.PodManifest{
			ACKind:    schema.PodManifestKind,
			ACVersion: schema.AppContainerVersion,
			Apps: schema.AppList([]schema.RuntimeApp{
				schema.RuntimeApp{
					Name: types.ACName(name),
					App:  imageManifest.App,
					Image: schema.RuntimeImage{
						Name:   &imageManifest.Name,
						Labels: imageManifest.Labels,
					},
				},
			}),
		},
	}
	container.log.SetField("container", container.uuid)
	container.log.Debugf("Launching container %s", container.uuid)

	// add it to the manager's map
	manager.containersLock.Lock()
	manager.containers[container.uuid] = container
	manager.containersLock.Unlock()

	// begin the startup sequence
	container.start()

	return container, nil
}
Beispiel #3
0
func TestVolumesToKvmDiskArgs(t *testing.T) {
	tests := []struct {
		volumes  []types.Volume
		expected []string
	}{
		{ // one host volume - one argument
			volumes:  []types.Volume{{Name: types.ACName("foo"), Kind: "host", Source: "src1"}},
			expected: []string{fmt.Sprintf("--9p=src1,%s", makeHashFromVolumeName("foo"))},
		},
		{ // on empty volume - no arguments
			volumes:  []types.Volume{{Name: types.ACName("foo"), Kind: "empty", Source: "src1"}},
			expected: []string{},
		},
		{ // two host volumes
			volumes: []types.Volume{
				{Name: types.ACName("foo"), Kind: "host", Source: "src1"},
				{Name: types.ACName("bar"), Kind: "host", Source: "src2"},
			},
			expected: []string{fmt.Sprintf("--9p=src1,%s", makeHashFromVolumeName("foo")),
				fmt.Sprintf("--9p=src2,%s", makeHashFromVolumeName("bar"))},
		},
		{ // mix host and empty
			volumes: []types.Volume{
				{Name: types.ACName("foo"), Kind: "host", Source: "src1"},
				{Name: types.ACName("baz"), Kind: "empty", Source: "src1"},
				{Name: types.ACName("bar"), Kind: "host", Source: "src2"},
			},
			expected: []string{fmt.Sprintf("--9p=src1,%s", makeHashFromVolumeName("foo")),
				fmt.Sprintf("--9p=src2,%s", makeHashFromVolumeName("bar"))},
		},
	}

	for i, tt := range tests {
		got := VolumesToKvmDiskArgs(tt.volumes)
		if len(got) != len(tt.expected) {
			t.Errorf("#%d: expected %v elements got %v", i, len(tt.expected), len(got))
		} else {
			for iarg, argExpected := range tt.expected {
				if got[iarg] != argExpected {
					t.Errorf("#%d: arg %d expected `%v` got `%v`", i, iarg, argExpected, got[iarg])
				}
			}
		}
	}
}
Beispiel #4
0
package acutil

import "github.com/appc/spec/schema/types"

// Empty ACName
const ACNoName = types.ACName("")

// Empty ACIdentifier
const ACNoIdentifier = types.ACIdentifier("")
Beispiel #5
0
func genManifest(path string) *schema.ImageManifest {
	// Get runtime.json and config.json
	runtimePath := path + "/runtime.json"
	configPath := path + "/config.json"

	runtime, err := ioutil.ReadFile(runtimePath)
	if err != nil {
		if debugEnabled {
			log.Printf("Open file runtime.json failed: %v", err)
		}
		return nil
	}

	config, err := ioutil.ReadFile(configPath)
	if err != nil {
		if debugEnabled {
			log.Printf("Open file config.json failed: %v", err)
		}
		return nil
	}

	var spec specs.LinuxSpec
	err = json.Unmarshal(config, &spec)
	if err != nil {
		if debugEnabled {
			log.Printf("Unmarshal config.json failed: %v", err)
		}
		return nil
	}

	var runSpec specs.LinuxRuntimeSpec
	err = json.Unmarshal(runtime, &runSpec)
	if err != nil {
		if debugEnabled {
			log.Printf("Unmarshal runtime.json failed: %v", err)
		}
		return nil
	}
	// Begin to convert runtime.json/config.json to manifest
	m := new(schema.ImageManifest)

	// 1. Assemble "acKind" field
	m.ACKind = schema.ImageManifestKind

	// 2. Assemble "acVersion" field
	m.ACVersion = schema.AppContainerVersion

	// 3. Assemble "name" field
	m.Name = types.ACIdentifier(manifestName)

	// 4. Assemble "labels" field
	// 4.1 "version"
	label := new(types.Label)
	label.Name = types.ACIdentifier("version")
	label.Value = spec.Version
	m.Labels = append(m.Labels, *label)
	// 4.2 "os"
	label = new(types.Label)
	label.Name = types.ACIdentifier("os")
	label.Value = spec.Platform.OS
	m.Labels = append(m.Labels, *label)
	// 4.3 "arch"
	label = new(types.Label)
	label.Name = types.ACIdentifier("arch")
	label.Value = spec.Platform.Arch
	m.Labels = append(m.Labels, *label)

	// 5. Assemble "app" field
	app := new(types.App)
	// 5.1 "exec"
	app.Exec = spec.Process.Args

	prefixDir := ""
	//var exeStr string
	if app.Exec == nil {
		app.Exec = append(app.Exec, "/bin/sh")
	} else {
		if !filepath.IsAbs(app.Exec[0]) {
			if spec.Process.Cwd == "" {
				prefixDir = "/"
			} else {
				prefixDir = spec.Process.Cwd
			}
		}
		app.Exec[0] = prefixDir + app.Exec[0]
	}

	// 5.2 "user"
	app.User = fmt.Sprintf("%d", spec.Process.User.UID)
	// 5.3 "group"
	app.Group = fmt.Sprintf("%d", spec.Process.User.GID)
	// 5.4 "eventHandlers"
	event := new(types.EventHandler)
	event.Name = "pre-start"
	for index := range runSpec.Hooks.Prestart {
		event.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Path)
		event.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Args...)
		event.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Env...)
	}
	if len(event.Exec) == 0 {
		event.Exec = append(event.Exec, "/bin/echo")
		event.Exec = append(event.Exec, "-n")
	}
	app.EventHandlers = append(app.EventHandlers, *event)
	event = new(types.EventHandler)
	event.Name = "post-stop"
	for index := range runSpec.Hooks.Poststop {
		event.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Path)
		event.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Args...)
		event.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Env...)
	}
	if len(event.Exec) == 0 {
		event.Exec = append(event.Exec, "/bin/echo")
		event.Exec = append(event.Exec, "-n")
	}
	app.EventHandlers = append(app.EventHandlers, *event)
	// 5.5 "workingDirectory"
	app.WorkingDirectory = spec.Process.Cwd
	// 5.6 "environment"
	env := new(types.EnvironmentVariable)
	for index := range spec.Process.Env {
		s := strings.Split(spec.Process.Env[index], "=")
		env.Name = s[0]
		env.Value = s[1]
		app.Environment = append(app.Environment, *env)
	}

	// 5.7 "mountPoints"
	for index := range spec.Mounts {
		mount := new(types.MountPoint)
		mount.Name = types.ACName(spec.Mounts[index].Name)
		mount.Path = spec.Mounts[index].Path
		mount.ReadOnly = false
		app.MountPoints = append(app.MountPoints, *mount)
	}

	// 5.8 "ports"

	// 5.9 "isolators"
	if runSpec.Linux.Resources != nil {
		if runSpec.Linux.Resources.CPU.Quota != 0 {
			cpuLimt := new(ResourceCPU)
			cpuLimt.Limit = fmt.Sprintf("%dm", runSpec.Linux.Resources.CPU.Quota)
			isolator := new(types.Isolator)
			isolator.Name = types.ACIdentifier("resource/cpu")
			bytes, _ := json.Marshal(cpuLimt)

			valueRaw := json.RawMessage(bytes)
			isolator.ValueRaw = &valueRaw

			app.Isolators = append(app.Isolators, *isolator)
		}
		if runSpec.Linux.Resources.Memory.Limit != 0 {
			memLimt := new(ResourceMem)
			memLimt.Limit = fmt.Sprintf("%dG", runSpec.Linux.Resources.Memory.Limit/(1024*1024*1024))
			isolator := new(types.Isolator)
			isolator.Name = types.ACIdentifier("resource/memory")
			bytes, _ := json.Marshal(memLimt)

			valueRaw := json.RawMessage(bytes)
			isolator.ValueRaw = &valueRaw

			app.Isolators = append(app.Isolators, *isolator)
		}
	}

	if len(spec.Linux.Capabilities) != 0 {
		isolatorCapSet := new(IsolatorCapSet)
		isolatorCapSet.Sets = append(isolatorCapSet.Sets, spec.Linux.Capabilities...)

		isolator := new(types.Isolator)
		isolator.Name = types.ACIdentifier(types.LinuxCapabilitiesRetainSetName)
		bytes, _ := json.Marshal(isolatorCapSet)

		valueRaw := json.RawMessage(bytes)
		isolator.ValueRaw = &valueRaw

		app.Isolators = append(app.Isolators, *isolator)
	}

	// 6. "annotations"

	// 7. "dependencies"

	// 8. "pathWhitelist"

	m.App = app

	return m
}
Beispiel #6
0
func NewAPIServiceListInspectPodsTest() testutils.Test {
	return testutils.TestFunc(func(t *testing.T) {
		ctx := testutils.NewRktRunCtx()
		defer ctx.Cleanup()

		svc := startAPIService(t, ctx)
		defer stopAPIService(t, svc)

		c, conn := newAPIClientOrFail(t, "localhost:15441")
		defer conn.Close()

		resp, err := c.ListPods(context.Background(), &v1alpha.ListPodsRequest{})
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}

		if len(resp.Pods) != 0 {
			t.Errorf("Unexpected result: %v, should see zero pods", resp.Pods)
		}

		patches := []string{"--exec=/inspect --print-msg=HELLO_API --exit-code=0"}
		imageHash, err := patchImportAndFetchHash("rkt-inspect-print.aci", patches, t, ctx)
		if err != nil {
			t.Fatalf("%v", err)
		}
		imgID, err := types.NewHash(imageHash)
		if err != nil {
			t.Fatalf("Cannot generate types.Hash from %v: %v", imageHash, err)
		}

		podManifests := []struct {
			mfst             schema.PodManifest
			net              string
			expectedExitCode int
		}{
			{
				// 1, Good pod.
				schema.PodManifest{
					ACKind:    schema.PodManifestKind,
					ACVersion: schema.AppContainerVersion,
					Apps: []schema.RuntimeApp{
						{
							Name: types.ACName("rkt-inspect"),
							Image: schema.RuntimeImage{
								Name: types.MustACIdentifier("coreos.com/rkt-inspect"),
								ID:   *imgID,
							},
							Annotations: []types.Annotation{{Name: types.ACIdentifier("app-test"), Value: "app-test"}},
						},
					},
					Annotations: []types.Annotation{
						{Name: types.ACIdentifier("test"), Value: "test"},
					},
				},
				"default",
				0,
			},
			{
				// 2, Bad pod, won't be launched correctly.
				schema.PodManifest{
					ACKind:    schema.PodManifestKind,
					ACVersion: schema.AppContainerVersion,
					Apps: []schema.RuntimeApp{
						{
							Name: types.ACName("rkt-inspect"),
							Image: schema.RuntimeImage{
								Name: types.MustACIdentifier("coreos.com/rkt-inspect"),
								ID:   *imgID,
							},
						},
					},
				},
				"non-existent-network",
				254,
			},
		}

		// Launch the pods.
		for _, entry := range podManifests {
			manifestFile := generatePodManifestFile(t, &entry.mfst)
			defer os.Remove(manifestFile)

			runCmd := fmt.Sprintf("%s run --net=%s --pod-manifest=%s", ctx.Cmd(), entry.net, manifestFile)
			waitOrFail(t, spawnOrFail(t, runCmd), entry.expectedExitCode)
		}

		time.Sleep(delta)

		gcCmd := fmt.Sprintf("%s gc --mark-only=true", ctx.Cmd())
		waitOrFail(t, spawnOrFail(t, gcCmd), 0)

		gcTime := time.Now()

		// ListPods(detail=false).
		resp, err = c.ListPods(context.Background(), &v1alpha.ListPodsRequest{})
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}

		if len(resp.Pods) != len(podManifests) {
			t.Errorf("Unexpected result: %v, should see %v pods", len(resp.Pods), len(podManifests))
		}

		for _, p := range resp.Pods {
			checkPodBasicsWithGCTime(t, ctx, p, gcTime)

			// Test InspectPod().
			inspectResp, err := c.InspectPod(context.Background(), &v1alpha.InspectPodRequest{Id: p.Id})
			if err != nil {
				t.Fatalf("Unexpected error: %v", err)
			}
			checkPodDetails(t, ctx, inspectResp.Pod)
		}

		// ListPods(detail=true).
		resp, err = c.ListPods(context.Background(), &v1alpha.ListPodsRequest{Detail: true})
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}

		if len(resp.Pods) != len(podManifests) {
			t.Errorf("Unexpected result: %v, should see %v pods", len(resp.Pods), len(podManifests))
		}

		for _, p := range resp.Pods {
			checkPodDetails(t, ctx, p)
		}

		// ListPods with corrupt pod directory
		// Note that we don't checkPodDetails here, the failure this is testing is
		// the api server panicing, which results in a list call hanging for ages
		// and then failing.
		// TODO: do further validation on the partial pods returned
		for _, p := range resp.Pods {
			numRemoved := 0
			podDir := getPodDir(t, ctx, p.Id)
			filepath.Walk(filepath.Join(podDir, "appsinfo"), filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
				if err != nil {
					return err
				}
				if info.Name() == "manifest" {
					os.Remove(path)
					numRemoved++
				}
				return nil
			}))
			if numRemoved == 0 {
				t.Fatalf("Expected to remove at least one app manifest for pod %v", p)
			}
		}

		// ListPods(detail=true).
		resp, err = c.ListPods(context.Background(), &v1alpha.ListPodsRequest{Detail: true})
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}
		if len(resp.Pods) != len(podManifests) {
			t.Fatalf("Expected %v pods, got %v pods", len(podManifests), len(resp.Pods))
		}
	})
}
func TestAPIServiceListInspectPods(t *testing.T) {
	ctx := testutils.NewRktRunCtx()
	defer ctx.Cleanup()

	svc := startAPIService(t, ctx)
	defer stopAPIService(t, svc)

	c, conn := newAPIClientOrFail(t, "localhost:15441")
	defer conn.Close()

	resp, err := c.ListPods(context.Background(), &v1alpha.ListPodsRequest{})
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	if len(resp.Pods) != 0 {
		t.Errorf("Unexpected result: %v, should see zero pods", resp.Pods)
	}

	patches := []string{"--exec=/inspect --print-msg=HELLO_API --exit-code=0"}
	imageHash := patchImportAndFetchHash("rkt-inspect-print.aci", patches, t, ctx)
	imgID, err := types.NewHash(imageHash)
	if err != nil {
		t.Fatalf("Cannot generate types.Hash from %v: %v", imageHash, err)
	}
	pm := schema.BlankPodManifest()
	pm.Apps = []schema.RuntimeApp{
		{
			Name: types.ACName("rkt-inspect"),
			Image: schema.RuntimeImage{
				Name: types.MustACIdentifier("coreos.com/rkt-inspect"),
				ID:   *imgID,
			},
			Annotations: []types.Annotation{{Name: types.ACIdentifier("app-test"), Value: "app-test"}},
		},
	}
	pm.Annotations = []types.Annotation{{Name: types.ACIdentifier("test"), Value: "test"}}
	manifestFile := generatePodManifestFile(t, pm)
	defer os.Remove(manifestFile)

	runCmd := fmt.Sprintf("%s run --pod-manifest=%s", ctx.Cmd(), manifestFile)
	waitOrFail(t, spawnOrFail(t, runCmd), 0)

	gcCmd := fmt.Sprintf("%s gc --mark-only=true", ctx.Cmd())
	waitOrFail(t, spawnOrFail(t, gcCmd), 0)

	gcTime := time.Now()

	// ListPods(detail=false).
	resp, err = c.ListPods(context.Background(), &v1alpha.ListPodsRequest{})
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	if len(resp.Pods) == 0 {
		t.Errorf("Unexpected result: %v, should see non-zero pods", resp.Pods)
	}

	for _, p := range resp.Pods {
		checkPodBasicsWithGCTime(t, ctx, p, gcTime)

		// Test InspectPod().
		inspectResp, err := c.InspectPod(context.Background(), &v1alpha.InspectPodRequest{Id: p.Id})
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}
		checkPodDetails(t, ctx, inspectResp.Pod)

		// Test Apps.
		for i, app := range p.Apps {
			checkAnnotations(t, pm.Apps[i].Annotations, app.Annotations)
		}
	}

	// ListPods(detail=true).
	resp, err = c.ListPods(context.Background(), &v1alpha.ListPodsRequest{Detail: true})
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	if len(resp.Pods) == 0 {
		t.Errorf("Unexpected result: %v, should see non-zero pods", resp.Pods)
	}

	for _, p := range resp.Pods {
		checkPodDetails(t, ctx, p)
	}
}
// Start the initd. This doesn't actually configure it, just starts it so we
// have a process and namespace to work with in the networking side of the
// world.
func (c *Container) launchStage2() error {
	c.log.Debug("Starting stage 2.")

	// Open a log file that all output from the container will be written to
	var err error
	flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_TRUNC
	stage2Stdout, err := os.OpenFile(c.stage2LogPath(), flags, os.FileMode(0666))
	if err != nil {
		return err
	}
	defer stage2Stdout.Close()

	// Initialize the stage2 launcher
	launcher := &client.Launcher{
		SocketPath: c.socketPath(),
		Directory:  c.stage3Path(),
		Chroot:     true,
		Cgroup:     c.cgroup,
		Stdout:     stage2Stdout,
		Stderr:     stage2Stdout,
	}

	// Configure which linux namespaces to create
	nsisolators := false
	if iso := c.image.App.Isolators.GetByName(kschema.LinuxNamespacesName); iso != nil {
		if niso, ok := iso.Value().(*kschema.LinuxNamespaces); ok {
			launcher.NewIPCNamespace = niso.IPC()
			launcher.NewMountNamespace = niso.Mount()
			launcher.NewNetworkNamespace = niso.Net()
			launcher.NewPIDNamespace = niso.PID()
			launcher.NewUserNamespace = niso.User()
			launcher.NewUTSNamespace = niso.UTS()
			nsisolators = true
		}
	}
	if !nsisolators {
		// set some defaults if no namespace isolator was given
		launcher.NewIPCNamespace = true
		launcher.NewMountNamespace = true
		launcher.NewPIDNamespace = true
		launcher.NewUTSNamespace = true
	}

	// Check for a privileged isolator
	if iso := c.image.App.Isolators.GetByName(kschema.HostPrivilegedName); iso != nil {
		if piso, ok := iso.Value().(*kschema.HostPrivileged); ok {
			if *piso {
				launcher.HostPrivileged = true

				// create the mount point
				podsDest, err := c.ensureContainerPathExists("host/pods")
				if err != nil {
					return err
				}
				procDest, err := c.ensureContainerPathExists("host/proc")
				if err != nil {
					return err
				}

				podsMount := strings.Replace(podsDest, c.stage3Path(), client.DefaultChrootPath, 1)
				procMount := strings.Replace(procDest, c.stage3Path(), client.DefaultChrootPath, 1)

				// create the mount point definitions for host access
				launcher.MountPoints = []*client.MountPoint{
					// Add the pods mount
					&client.MountPoint{
						Source:      c.manager.containerDirectory,
						Destination: podsMount,
						Flags:       syscall.MS_BIND,
					},
					// Make the pods mount read only. This cannot be done all in one, and
					// needs MS_BIND included to avoid "resource busy" and to ensure we're
					// only making the bind location read-only, not the parent.
					&client.MountPoint{
						Source:      podsMount,
						Destination: podsMount,
						Flags:       syscall.MS_BIND | syscall.MS_REMOUNT | syscall.MS_RDONLY,
					},

					// Add the host's proc filesystem under host/proc. This can be done
					// for diagnostics of the host's state, and can also be used to get
					// access to the host's filesystem (via /host/proc/1/root/...). This
					// is not read-only because making it read only isn't effective. You
					// can still traverse into .../root/... partitions due to the magic
					// that is proc and namespaces. Using proc is more useful than root
					// because it ensures more consistent access to process's actual
					// filesystem state as it crosses namespaces. Direct bind mounts tend
					// to miss some child mounts, even when trying to ensure everything is
					// shared.
					&client.MountPoint{
						Source:      "/proc",
						Destination: procMount,
						Flags:       syscall.MS_BIND,
					},
				}

				// If a volume directory is defined, then map it in as well.
				if c.manager.volumeDirectory != "" {
					volumesDest, err := c.ensureContainerPathExists("host/volumes")
					if err != nil {
						return err
					}
					volumesMount := strings.Replace(volumesDest, c.stage3Path(), client.DefaultChrootPath, 1)
					launcher.MountPoints = append(launcher.MountPoints,
						&client.MountPoint{
							Source:      c.manager.volumeDirectory,
							Destination: volumesMount,
							Flags:       syscall.MS_BIND,
						})
				}
			}
		}
	}

	// Apply any volumes that are needed as mount points on the launcher
	if c.manager.volumeDirectory != "" {
		podApp := c.pod.Apps.Get(types.ACName(c.image.Name.String()))
		for _, mp := range c.image.App.MountPoints {
			hostPath, err := c.manager.getVolumePath(mp.Name.String())
			if err != nil {
				return err
			}

			podPath, err := c.ensureContainerPathExists(mp.Path)
			if err != nil {
				return err
			}
			podMount := strings.Replace(podPath, c.stage3Path(), client.DefaultChrootPath, 1)

			launcher.MountPoints = append(launcher.MountPoints, &client.MountPoint{
				Source:      hostPath,
				Destination: podMount,
				Flags:       syscall.MS_BIND,
			})

			// If the mount point should be read only, then add a second mount handler
			// to trigger it to be read-only.
			if mp.ReadOnly {
				launcher.MountPoints = append(launcher.MountPoints, &client.MountPoint{
					Source:      hostPath,
					Destination: podMount,
					Flags:       syscall.MS_BIND | syscall.MS_REMOUNT | syscall.MS_RDONLY,
				})
			}

			// Add to the PodManifest
			ro := mp.ReadOnly
			podApp.Mounts = append(podApp.Mounts, schema.Mount{
				Volume:     mp.Name,
				MountPoint: mp.Name,
			})
			c.pod.Volumes = append(c.pod.Volumes, types.Volume{
				Name:     mp.Name,
				Kind:     "host",
				Source:   hostPath,
				ReadOnly: &ro,
			})
		}
	}

	client, err := launcher.Run()
	if err != nil {
		return err
	}
	c.mutex.Lock()
	c.initdClient = client
	c.mutex.Unlock()

	c.log.Trace("Done starting stage 2.")
	return nil
}