// ProbeVolumePlugins collects all volume plugins into an easy to use list. // PluginDir specifies the directory to search for additional third party // volume plugins. func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by the kubelet binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. // // Kubelet does not currently need to configure volume plugins. // If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...) allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...) allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(pluginDir)...) return allPlugins }
func testProbeVolumePlugins() []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, ProbeVolumePlugins()...) return allPlugins }
func (c *MasterConfig) RunPersistentVolumeProvisioner(client *client.Client) { provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, c.ControllerManager.VolumeConfiguration) if err != nil { // a provisioner was expected but encountered an error glog.Fatal(err) } // not all cloud providers have a provisioner. if provisioner != nil { allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) controllerClient := volumeclaimbinder.NewControllerClient(clientadapter.FromUnversionedClient(client)) provisionerController, err := volumeclaimbinder.NewPersistentVolumeProvisionerController( controllerClient, c.ControllerManager.PVClaimBinderSyncPeriod.Duration, c.ControllerManager.ClusterName, allPlugins, provisioner, c.CloudProvider, ) if err != nil { glog.Fatalf("Unable to start persistent volume provisioner: %+v", err) } provisionerController.Run() } }
// ProbeControllerVolumePlugins collects all persistent volume plugins into an // easy to use list. Only volume plugins that implement any of // provisioner/recycler/deleter interface should be returned. func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by this binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. // Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated // options meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig // for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func. // HostPath recycling is for testing and development purposes only! hostPathConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), ProvisioningEnabled: config.EnableHostPathProvisioning, } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) } allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) nfsConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS), RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil { glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) // add rbd provisioner allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...) if cloud != nil { switch { case aws.ProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) case gce.ProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) case openstack.ProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) case vsphere.ProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) case azure.CloudProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) case photon.ProviderName == cloud.ProviderName(): allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...) } } return allPlugins }
// ProbeAttachableVolumePlugins collects all volume plugins for the attach/ // detach controller. VolumeConfiguration is used ot get FlexVolumePluginDir // which specifies the directory to search for additional third party volume // plugins. // The list of plugins is manually compiled. This code and the plugin // initialization code for kubelet really, really need a through refactor. func ProbeAttachableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(config.FlexVolumePluginDir)...) return allPlugins }
func (c *MasterConfig) RunPersistentVolumeClaimRecycler(recyclerImageName string, client *client.Client, namespace string) { uid := int64(0) defaultScrubPod := volume.NewPersistentVolumeRecyclerPodTemplate() defaultScrubPod.Namespace = namespace defaultScrubPod.Spec.Containers[0].Image = recyclerImageName defaultScrubPod.Spec.Containers[0].Command = []string{"/usr/bin/recycle"} defaultScrubPod.Spec.Containers[0].Args = []string{"/scrub"} defaultScrubPod.Spec.Containers[0].SecurityContext = &kapi.SecurityContext{RunAsUser: &uid} defaultScrubPod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent volumeConfig := c.ControllerManager.VolumeConfiguration hostPathConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: defaultScrubPod, } if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath) != 0 { if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) } } nfsConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS), RecyclerTimeoutIncrement: int(volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS), RecyclerPodTemplate: defaultScrubPod, } if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS) != 0 { if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil { glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) } } allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) // dynamic provisioning allows deletion of volumes as a recycling operation after a claim is deleted allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) recycler, err := volumeclaimbinder.NewPersistentVolumeRecycler( clientadapter.FromUnversionedClient(client), c.ControllerManager.PVClaimBinderSyncPeriod.Duration, int(volumeConfig.PersistentVolumeRecyclerConfiguration.MaximumRetry), allPlugins, c.CloudProvider, ) if err != nil { glog.Fatalf("Could not start Persistent Volume Recycler: %+v", err) } recycler.Run() }
// probeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName string) []volume.VolumePlugin { uid := int64(0) defaultScrubPod := volume.NewPersistentVolumeRecyclerPodTemplate() defaultScrubPod.Namespace = namespace defaultScrubPod.Spec.ServiceAccountName = recyclerServiceAccountName defaultScrubPod.Spec.Containers[0].Image = recyclerImageName defaultScrubPod.Spec.Containers[0].Command = []string{"/usr/bin/openshift-recycle"} defaultScrubPod.Spec.Containers[0].Args = []string{"/scrub"} defaultScrubPod.Spec.Containers[0].SecurityContext = &kapi.SecurityContext{RunAsUser: &uid} defaultScrubPod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by this binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. // Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated // options meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig // for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func. // HostPath recycling is for testing and development purposes only! hostPathConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath), RecyclerPodTemplate: defaultScrubPod, } if err := kctrlmgr.AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) } allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) nfsConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS), RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS), RecyclerPodTemplate: defaultScrubPod, } if err := kctrlmgr.AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil { glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(config.FlexVolumePluginDir)...) allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) return allPlugins }
// NewVolumeProvisioner returns a volume provisioner to use when running in a cloud or development environment. // The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many. // We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues. // Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not. func NewVolumeProvisioner(cloud cloudprovider.Interface, flags VolumeConfigFlags) (volume.ProvisionableVolumePlugin, error) { switch { case cloud == nil && flags.EnableHostPathProvisioning: return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{})) case cloud != nil && aws_cloud.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) case cloud != nil && gce_cloud.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) } return nil, nil }
// NewVolumeProvisioner returns a volume provisioner to use when running in a cloud or development environment. // The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many. // We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues. // Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not. func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) { switch { case cloud == nil && config.EnableHostPathProvisioning: return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{})) case cloud != nil && aws.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) case cloud != nil && gce.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) case cloud != nil && vsphere.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins()) } return nil, nil }
// ProbeVolumePlugins collects all volume plugins into an easy to use list. func ProbeVolumePlugins() []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by the kubelet binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...) allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...) allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) return allPlugins }
func (c *MasterConfig) RunPersistentVolumeProvisioner(client *client.Client) { provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, c.ControllerManager.VolumeConfigFlags) if err != nil { // a provisioner was expected but encountered an error glog.Fatal(err) } // not all cloud providers have a provisioner. if provisioner != nil { allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) controllerClient := volumeclaimbinder.NewControllerClient(client) provisionerController, err := volumeclaimbinder.NewPersistentVolumeProvisionerController(controllerClient, c.ControllerManager.PVClaimBinderSyncPeriod, allPlugins, provisioner, c.CloudProvider) if err != nil { glog.Fatalf("Could not start Persistent Volume Provisioner: %+v", err) } provisionerController.Run() } }
// NewAlphaVolumeProvisioner returns a volume provisioner to use when running in // a cloud or development environment. The alpha implementation of provisioning // allows 1 implied provisioner per cloud and is here only for compatibility // with Kubernetes 1.3 // TODO: remove in Kubernetes 1.5 func NewAlphaVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) { switch { case !utilconfig.DefaultFeatureGate.DynamicVolumeProvisioning(): return nil, nil case cloud == nil && config.EnableHostPathProvisioning: return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins( volume.VolumeConfig{ ProvisioningEnabled: true, })) case cloud != nil && aws.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) case cloud != nil && gce.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) case cloud != nil && vsphere.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins()) case cloud != nil && azure.CloudProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(azure_dd.ProbeVolumePlugins()) } return nil, nil }
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. func ProbeRecyclableVolumePlugins(flags VolumeConfigFlags) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} // The list of plugins to probe is decided by this binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. // Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated // CLI flags meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig // for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func. // HostPath recycling is for testing and development purposes only! hostPathConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutHostPath, RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutHostPath, RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, &hostPathConfig); err != nil { glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, err) } allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) nfsConfig := volume.VolumeConfig{ RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutNFS, RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutNFS, RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, &nfsConfig); err != nil { glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, err) } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) return allPlugins }
func TestNewBuilder(t *testing.T) { tests := []struct { pv *api.PersistentVolume claim *api.PersistentVolumeClaim plugin volume.VolumePlugin podVolume api.VolumeSource testFunc func(builder volume.Builder, plugin volume.VolumePlugin) error expectedFailure bool }{ { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if !strings.Contains(builder.GetPath(), utilstrings.EscapeQualifiedNameForDisk(plugin.Name())) { return fmt.Errorf("builder path expected to contain plugin name. Got: %s", builder.GetPath()) } return nil }, expectedFailure: false, }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvB", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/somepath"}, }, ClaimRef: &api.ObjectReference{ Name: "claimB", }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimB", Namespace: "nsB", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimB", }, }, plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder.GetPath() != "/somepath" { return fmt.Errorf("Expected HostPath.Path /somepath, got: %s", builder.GetPath()) } return nil }, expectedFailure: false, }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder != nil { return fmt.Errorf("Unexpected non-nil builder: %+v", builder) } return nil }, expectedFailure: true, // missing pv.Spec.ClaimRef }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimB", UID: types.UID("abc123"), }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", UID: types.UID("def456"), }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder != nil { return fmt.Errorf("Unexpected non-nil builder: %+v", builder) } return nil }, expectedFailure: true, // mismatched pv.Spec.ClaimRef and pvc }, } for _, item := range tests { client := fake.NewSimpleClientset(item.pv, item.claim) plugMgr := volume.VolumePluginMgr{} tempDir, vh := newTestHost(t, client) defer os.RemoveAll(tempDir) plugMgr.InitPlugins(testProbeVolumePlugins(), vh) plug, err := plugMgr.FindPluginByName("kubernetes.io/persistent-claim") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &volume.Spec{Volume: &api.Volume{VolumeSource: item.podVolume}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !item.expectedFailure { if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v", builder) } } if err := item.testFunc(builder, item.plugin); err != nil { t.Errorf("Unexpected error %+v", err) } } }