// SetUpAt creates new directory and clones a git repo. func (b *gitRepoVolumeBuilder) SetUpAt(dir string) error { if volumeutil.IsReady(b.getMetaDir()) { return nil } // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperBuilder(wrappedVolumeSpec, &b.pod, b.opts) if err != nil { return err } if err := wrapped.SetUpAt(dir); err != nil { return err } args := []string{"clone", b.source} if len(b.target) != 0 { args = append(args, b.target) } if output, err := b.execCommand("git", args, dir); err != nil { return fmt.Errorf("failed to exec 'git %s': %s: %v", strings.Join(args, " "), output, err) } files, err := ioutil.ReadDir(dir) if err != nil { return err } if len(b.revision) == 0 { // Done! volumeutil.SetReady(b.getMetaDir()) return nil } var subdir string switch { case b.target == ".": // if target dir is '.', use the current dir subdir = path.Join(dir) case len(files) == 1: // if target is not '.', use the generated folder subdir = path.Join(dir, files[0].Name()) default: // if target is not '.', but generated many files, it's wrong return fmt.Errorf("unexpected directory contents: %v", files) } if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil { return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err) } if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil { return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } volumeutil.SetReady(b.getMetaDir()) return nil }
func (b *secretVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) // Getting an os.IsNotExist err from is a contingency; the directory // may not exist yet, in which case, setup should run. if err != nil && !os.IsNotExist(err) { return err } // If the plugin readiness file is present for this volume and // the setup dir is a mountpoint, this volume is already ready. if volumeutil.IsReady(b.getMetaDir()) && !notMnt { return nil } glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, &b.pod, *b.opts) if err != nil { return err } if err := wrapped.SetUpAt(dir, fsGroup); err != nil { return err } kubeClient := b.plugin.host.GetKubeClient() if kubeClient == nil { return fmt.Errorf("Cannot setup secret volume %v because kube client is not configured", b.volName) } secret, err := kubeClient.Core().Secrets(b.pod.Namespace).Get(b.secretName) if err != nil { glog.Errorf("Couldn't get secret %v/%v", b.pod.Namespace, b.secretName) return err } else { totalBytes := totalSecretBytes(secret) glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, b.secretName, len(secret.Data), totalBytes) } for name, data := range secret.Data { hostFilePath := path.Join(dir, name) glog.V(3).Infof("Writing secret data %v/%v/%v (%v bytes) to host file %v", b.pod.Namespace, b.secretName, name, len(data), hostFilePath) err := b.writer.WriteFile(hostFilePath, data, 0444) if err != nil { glog.Errorf("Error writing secret data to host path: %v, %v", hostFilePath, err) return err } } volume.SetVolumeOwnership(b, fsGroup) volumeutil.SetReady(b.getMetaDir()) return nil }
// Test the case where the 'ready' file has been created and the pod volume dir // is a mountpoint. Mount should not be called. func TestPluginIdempotent(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid2") testVolumeName = "test_volume_name" testNamespace = "test_secret_namespace" testName = "test_secret_name" volumeSpec = volumeSpec(testVolumeName, testName) secret = secret(testNamespace, testName) client = testclient.NewSimpleFake(&secret) pluginMgr = volume.VolumePluginMgr{} rootDir, host = newTestHost(t, client) ) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(secretPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir) podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} mounter := host.GetMounter().(*mount.FakeMounter) mounter.MountPoints = []mount.MountPoint{ { Path: podVolumeDir, }, } util.SetReady(podMetadataDir) builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volumePath := builder.GetPath() err = builder.SetUp() if err != nil { t.Errorf("Failed to setup volume: %v", err) } if len(mounter.Log) != 0 { t.Errorf("Unexpected calls made to mounter: %v", mounter.Log) } if _, err := os.Stat(volumePath); err != nil { if !os.IsNotExist(err) { t.Errorf("SetUp() failed unexpectedly: %v", err) } } else { t.Errorf("volume path should not exist: %v", volumePath) } }
// SetUpAt creates new directory and clones a git repo. func (b *gitRepoVolumeBuilder) SetUpAt(dir string) error { if volumeutil.IsReady(b.getMetaDir()) { return nil } // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperBuilder(wrappedVolumeSpec, &b.pod, b.opts, b.mounter) if err != nil { return err } if err := wrapped.SetUpAt(dir); err != nil { return err } if output, err := b.execCommand("git", []string{"clone", b.source}, dir); err != nil { return fmt.Errorf("failed to exec 'git clone %s': %s: %v", b.source, output, err) } files, err := ioutil.ReadDir(dir) if err != nil { return err } if len(files) != 1 { return fmt.Errorf("unexpected directory contents: %v", files) } if len(b.revision) == 0 { // Done! volumeutil.SetReady(b.getMetaDir()) return nil } subdir := path.Join(dir, files[0].Name()) if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil { return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err) } if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil { return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } volumeutil.SetReady(b.getMetaDir()) return nil }
// Test the case where the plugin's ready file exists, but the volume dir is not a // mountpoint, which is the state the system will be in after reboot. The dir // should be mounter and the configMap data written to it. func TestPluginReboot(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid3") testVolumeName = "test_volume_name" testNamespace = "test_configmap_namespace" testName = "test_configmap_name" volumeSpec = volumeSpec(testVolumeName, testName, 0644) configMap = configMap(testNamespace, testName) client = fake.NewSimpleClientset(&configMap) pluginMgr = volume.VolumePluginMgr{} rootDir, host = newTestHost(t, client) ) defer os.RemoveAll(rootDir) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(configMapPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir) util.SetReady(podMetadataDir) volumePath := mounter.GetPath() if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) { t.Errorf("Got unexpected path: %s", volumePath) } fsGroup := int64(1001) err = mounter.SetUp(&fsGroup) if err != nil { t.Errorf("Failed to setup volume: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } doTestConfigMapDataInVolume(volumePath, configMap, t) doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) }
// Test the case where the plugin's ready file exists, but the volume dir is not a // mountpoint, which is the state the system will be in after reboot. The dir // should be mounter and the secret data written to it. func TestPluginReboot(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid3") testVolumeName = "test_volume_name" testNamespace = "test_secret_namespace" testName = "test_secret_name" volumeSpec = volumeSpec(testVolumeName, testName) secret = secret(testNamespace, testName) client = testclient.NewSimpleFake(&secret) pluginMgr = volume.VolumePluginMgr{} rootDir, host = newTestHost(t, client) ) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(secretPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir) util.SetReady(podMetadataDir) volumePath := builder.GetPath() if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) { t.Errorf("Got unexpected path: %s", volumePath) } err = builder.SetUp() if err != nil { t.Errorf("Failed to setup volume: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } doTestSecretDataInVolume(volumePath, secret, t) doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) }
// SetUpAt creates new directory. func (ed *emptyDir) SetUpAt(dir string) error { isMnt, err := ed.mounter.IsMountPoint(dir) // Getting an os.IsNotExist err from is a contingency; the directory // may not exist yet, in which case, setup should run. if err != nil && !os.IsNotExist(err) { return err } // If the plugin readiness file is present for this volume, and the // storage medium is the default, then the volume is ready. If the // medium is memory, and a mountpoint is present, then the volume is // ready. if volumeutil.IsReady(ed.getMetaDir()) { if ed.medium == api.StorageMediumMemory && isMnt { return nil } else if ed.medium == api.StorageMediumDefault { return nil } } // Determine the effective SELinuxOptions to use for this volume. securityContext := "" if selinuxEnabled() { securityContext, err = ed.determineEffectiveSELinuxOptions() if err != nil { return err } } switch ed.medium { case api.StorageMediumDefault: err = ed.setupDir(dir, securityContext) case api.StorageMediumMemory: err = ed.setupTmpfs(dir, securityContext) default: err = fmt.Errorf("unknown storage medium %q", ed.medium) } if err == nil { volumeutil.SetReady(ed.getMetaDir()) } return err }
/* SetUpAt will setup a Flocker volume following this flow of calls to the Flocker control service: 1. Get the dataset id for the given volume name/dir 2. It should already be there, if it's not the user needs to manually create it 3. Check the current Primary UUID 4. If it doesn't match with the Primary UUID that we got on 2, then we will need to update the Primary UUID for this volume. 5. Wait until the Primary UUID was updated or timeout. */ func (b flockerBuilder) SetUpAt(dir string, fsGroup *int64) error { if volumeutil.IsReady(b.getMetaDir()) { return nil } if b.client == nil { c, err := b.newFlockerClient() if err != nil { return err } b.client = c } datasetID, err := b.client.GetDatasetID(dir) if err != nil { return err } s, err := b.client.GetDatasetState(datasetID) if err != nil { return fmt.Errorf("The volume '%s' is not available in Flocker. You need to create this manually with Flocker CLI before using it.", dir) } primaryUUID, err := b.client.GetPrimaryUUID() if err != nil { return err } if s.Primary != primaryUUID { if err := b.updateDatasetPrimary(datasetID, primaryUUID); err != nil { return err } newState, err := b.client.GetDatasetState(datasetID) if err != nil { return fmt.Errorf("The volume '%s' migrated unsuccessfully.", datasetID) } b.flocker.path = newState.Path } else { b.flocker.path = s.Path } volumeutil.SetReady(b.getMetaDir()) return nil }
// SetUpAt creates new directory. func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := ed.mounter.IsLikelyNotMountPoint(dir) // Getting an os.IsNotExist err from is a contingency; the directory // may not exist yet, in which case, setup should run. if err != nil && !os.IsNotExist(err) { return err } // If the plugin readiness file is present for this volume, and the // storage medium is the default, then the volume is ready. If the // medium is memory, and a mountpoint is present, then the volume is // ready. if volumeutil.IsReady(ed.getMetaDir()) { if ed.medium == api.StorageMediumMemory && !notMnt { return nil } else if ed.medium == api.StorageMediumDefault { return nil } } switch ed.medium { case api.StorageMediumDefault: err = ed.setupDir(dir) case api.StorageMediumMemory: err = ed.setupTmpfs(dir) default: err = fmt.Errorf("unknown storage medium %q", ed.medium) } volume.SetVolumeOwnership(ed, fsGroup) if err == nil { volumeutil.SetReady(ed.getMetaDir()) } return err }
// doTestPlugin sets up a volume and tears it back down. func doTestPlugin(t *testing.T, config pluginTestConfig) { basePath, err := ioutil.TempDir("/tmp", "emptydir_volume_test") if err != nil { t.Fatalf("can't make a temp rootdir") } var ( volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume") metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume") plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) volumeName = "test-volume" spec = &api.Volume{ Name: volumeName, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}}, } mounter = mount.FakeMounter{} mountDetector = fakeMountDetector{} pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} fakeChconRnr = &fakeChconRunner{} ) // Set up the SELinux options on the pod if config.SELinuxOptions != nil { pod.Spec = api.PodSpec{ Containers: []api.Container{ { SecurityContext: &api.SecurityContext{ SELinuxOptions: config.SELinuxOptions, }, VolumeMounts: []api.VolumeMount{ { Name: volumeName, }, }, }, }, } } if config.idempotent { mounter.MountPoints = []mount.MountPoint{ { Path: volumePath, }, } util.SetReady(metadataDir) } builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &mounter, &mountDetector, volume.VolumeOptions{config.rootContext}, fakeChconRnr) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volPath := builder.GetPath() if volPath != volumePath { t.Errorf("Got unexpected path: %s", volPath) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } // Stat the directory and check the permission bits fileinfo, err := os.Stat(volPath) if !config.idempotent { if err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volPath) } else { t.Errorf("SetUp() failed: %v", err) } } if e, a := perm, fileinfo.Mode().Perm(); e != a { t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a) } } else if err == nil { // If this test is for idempotency and we were able // to stat the volume path, it's an error. t.Errorf("Volume directory was created unexpectedly") } // Check the number of chcons during setup if e, a := config.expectedChcons, len(fakeChconRnr.requests); e != a { t.Errorf("Expected %v chcon calls, got %v", e, a) } if config.expectedChcons == 1 { if e, a := config.expectedSELinuxContext, fakeChconRnr.requests[0].context; e != a { t.Errorf("Unexpected chcon context argument; expected: %v, got: %v", e, a) } if e, a := volPath, fakeChconRnr.requests[0].dir; e != a { t.Errorf("Unexpected chcon path argument: expected: %v, got: %v", e, a) } } // Check the number of mounts performed during setup if e, a := config.expectedSetupMounts, len(mounter.Log); e != a { t.Errorf("Expected %v mounter calls during setup, got %v", e, a) } else if config.expectedSetupMounts == 1 && (mounter.Log[0].Action != mount.FakeActionMount || mounter.Log[0].FSType != "tmpfs") { t.Errorf("Unexpected mounter action during setup: %#v", mounter.Log[0]) } mounter.ResetLog() // Make a cleaner for the volume teardownMedium := mediumUnknown if config.medium == api.StorageMediumMemory { teardownMedium = mediumMemory } cleanerMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal(volumeName, types.UID("poduid"), &mounter, cleanerMountDetector) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } // Tear down the volume if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volPath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volPath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Check the number of mounter calls during tardown if e, a := config.expectedTeardownMounts, len(mounter.Log); e != a { t.Errorf("Expected %v mounter calls during teardown, got %v", e, a) } else if config.expectedTeardownMounts == 1 && mounter.Log[0].Action != mount.FakeActionUnmount { t.Errorf("Unexpected mounter action during teardown: %#v", mounter.Log[0]) } mounter.ResetLog() }
// doTestPlugin sets up a volume and tears it back down. func doTestPlugin(t *testing.T, config pluginTestConfig) { basePath, err := utiltesting.MkTmpdir("emptydir_volume_test") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } defer os.RemoveAll(basePath) var ( volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume") metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume") plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) volumeName = "test-volume" spec = &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}}, } physicalMounter = mount.FakeMounter{} mountDetector = fakeMountDetector{} pod = &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}} ) if config.idempotent { physicalMounter.MountPoints = []mount.MountPoint{ { Path: volumePath, }, } util.SetReady(metadataDir) } mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &physicalMounter, &mountDetector, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := mounter.GetPath() if volPath != volumePath { t.Errorf("Got unexpected path: %s", volPath) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } // Stat the directory and check the permission bits fileinfo, err := os.Stat(volPath) if !config.idempotent { if err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volPath) } else { t.Errorf("SetUp() failed: %v", err) } } if e, a := perm, fileinfo.Mode().Perm(); e != a { t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a) } } else if err == nil { // If this test is for idempotency and we were able // to stat the volume path, it's an error. t.Errorf("Volume directory was created unexpectedly") } // Check the number of mounts performed during setup if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a) } else if config.expectedSetupMounts == 1 && (physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") { t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() // Make an unmounter for the volume teardownMedium := mediumUnknown if config.medium == v1.StorageMediumMemory { teardownMedium = mediumMemory } unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } // Tear down the volume if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volPath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volPath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Check the number of physicalMounter calls during tardown if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a) } else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount { t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() }