// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume. // This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster. func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID()) capacity := r.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: r.options.PVName, Annotations: map[string]string{ "kubernetes.io/createdby": "hostpath-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy, AccessModes: r.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): capacity, }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: fullpath, }, }, }, } if len(r.options.PVC.Spec.AccessModes) == 0 { pv.Spec.AccessModes = r.plugin.GetAccessModes() } return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750) }
// Usage knows how to measure usage associated with item. func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { result := api.ResourceList{} pvc, err := toInternalPersistentVolumeClaimOrError(item) if err != nil { return result, err } storageClassRef := util.GetClaimStorageClass(pvc) // charge for claim result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1") if len(storageClassRef) > 0 { storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims)) result[storageClassClaim] = resource.MustParse("1") } // charge for storage if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found { result[api.ResourceRequestsStorage] = request // charge usage to the storage class (if present) if len(storageClassRef) > 0 { storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage)) result[storageClassStorage] = request } } return result, nil }
func mockCustomBuild(forcePull, emptySource bool) *buildapi.Build { timeout := int64(60) src := buildapi.BuildSource{} if !emptySource { src = buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ URI: "http://my.build.com/the/dockerbuild/Dockerfile", Ref: "master", }, ContextDir: "foo", SourceSecret: &kapi.LocalObjectReference{Name: "secretFoo"}, } } return &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: "customBuild", Labels: map[string]string{ "name": "customBuild", }, }, Spec: buildapi.BuildSpec{ CommonSpec: buildapi.CommonSpec{ Revision: &buildapi.SourceRevision{ Git: &buildapi.GitSourceRevision{}, }, Source: src, Strategy: buildapi.BuildStrategy{ CustomStrategy: &buildapi.CustomBuildStrategy{ From: kapi.ObjectReference{ Kind: "DockerImage", Name: "builder-image", }, Env: []kapi.EnvVar{ {Name: "FOO", Value: "BAR"}, }, ExposeDockerSocket: true, ForcePull: forcePull, }, }, Output: buildapi.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "docker-registry/repository/customBuild", }, PushSecret: &kapi.LocalObjectReference{Name: "foo"}, }, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, CompletionDeadlineSeconds: &timeout, }, }, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, }, } }
func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID()) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: fc.Options.PVName, Annotations: map[string]string{ "kubernetes.io/createdby": "fakeplugin-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy, AccessModes: fc.Options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: fullpath, }, }, }, } return pv, nil }
func TestPersistentVolumeDeleter(t *testing.T) { _, s := runAMaster(t) defer s.Close() deleteAllEtcdKeys() binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second) binder.Run() defer binder.Stop() recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}}) recycler.Run() defer recycler.Stop() // This PV will be claimed, released, and recycled. pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{Name: "fake-pv"}, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}}, Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, }, } pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: "fake-pvc"}, Spec: api.PersistentVolumeClaimSpec{ Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, }, } w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0") defer w.Stop() _, _ = testClient.PersistentVolumes().Create(pv) _, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) // wait until the binder pairs the volume and claim waitForPersistentVolumePhase(w, api.VolumeBound) // deleting a claim releases the volume, after which it can be recycled if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } waitForPersistentVolumePhase(w, api.VolumeReleased) for { event := <-w.ResultChan() if event.Type == watch.Deleted { break } } }
func mockSTIBuild() *buildapi.Build { timeout := int64(60) return &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: "stiBuild", Labels: map[string]string{ "name": "stiBuild", }, }, Spec: buildapi.BuildSpec{ CommonSpec: buildapi.CommonSpec{ Revision: &buildapi.SourceRevision{ Git: &buildapi.GitSourceRevision{}, }, Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ URI: "http://my.build.com/the/stibuild/Dockerfile", Ref: "master", }, ContextDir: "foo", SourceSecret: &kapi.LocalObjectReference{Name: "fooSecret"}, }, Strategy: buildapi.BuildStrategy{ SourceStrategy: &buildapi.SourceBuildStrategy{ From: kapi.ObjectReference{ Kind: "DockerImage", Name: "repository/sti-builder", }, PullSecret: &kapi.LocalObjectReference{Name: "bar"}, Scripts: "http://my.build.com/the/sti/scripts", Env: []kapi.EnvVar{ {Name: "BUILD_LOGLEVEL", Value: "bar"}, {Name: "ILLEGAL", Value: "foo"}, }, }, }, Output: buildapi.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "docker-registry/repository/stiBuild", }, PushSecret: &kapi.LocalObjectReference{Name: "foo"}, }, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, CompletionDeadlineSeconds: &timeout, NodeSelector: nodeSelector, }, }, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, }, } }
// find returns the nearest PV from the ordered list or nil if a match is not found func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVolumeClaim, matchPredicate matchPredicate) (*api.PersistentVolume, error) { // PVs are indexed by their access modes to allow easier searching. Each index is the string representation of a set of access modes. // There is a finite number of possible sets and PVs will only be indexed in one of them (whichever index matches the PV's modes). // // A request for resources will always specify its desired access modes. Any matching PV must have at least that number // of access modes, but it can have more. For example, a user asks for ReadWriteOnce but a GCEPD is available, which is ReadWriteOnce+ReadOnlyMany. // // Searches are performed against a set of access modes, so we can attempt not only the exact matching modes but also // potential matches (the GCEPD example above). allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes) for _, modes := range allPossibleModes { volumes, err := pvIndex.ListByAccessModes(modes) if err != nil { return nil, err } // volumes are sorted by size but some may be bound or earmarked for a specific claim. // filter those volumes for easy binary search by size // return the exact pre-binding match, if found unboundVolumes := []*api.PersistentVolume{} for _, volume := range volumes { // volume isn't currently bound or pre-bound. if volume.Spec.ClaimRef == nil { unboundVolumes = append(unboundVolumes, volume) continue } if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace { // exact match! No search required. return volume, nil } } // a claim requesting provisioning will have an exact match pre-bound to the claim. // no need to search through unbound volumes. The matching volume will be created by the provisioner // and will match above when the claim is re-processed by the binder. if keyExists(qosProvisioningKey, claim.Annotations) { return nil, nil } searchPV := &api.PersistentVolume{ Spec: api.PersistentVolumeSpec{ AccessModes: claim.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], }, }, } i := sort.Search(len(unboundVolumes), func(i int) bool { return matchPredicate(searchPV, unboundVolumes[i]) }) if i < len(unboundVolumes) { return unboundVolumes[i], nil } } return nil, nil }
func (a *azureDiskProvisioner) Provision() (*api.PersistentVolume, error) { var sku, location, account string name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 255) capacity := a.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] requestBytes := capacity.Value() requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) // Apply ProvisionerParameters (case-insensitive). We leave validation of // the values to the cloud provider. for k, v := range a.options.Parameters { switch strings.ToLower(k) { case "skuname": sku = v case "location": location = v case "storageaccount": account = v default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName()) } } // TODO: implement c.options.ProvisionerSelector parsing if a.options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") } diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB) if err != nil { return nil, err } pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: a.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "azure-disk-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy, AccessModes: a.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ AzureDisk: &api.AzureDiskVolumeSource{ DiskName: diskName, DataDiskURI: diskUri, }, }, }, } return pv, nil }
func OkStrategy() deployapi.DeploymentStrategy { return deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRecreate, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, } }
func okContainer() *kapi.Container { return &kapi.Container{ Image: "openshift/origin-deployer", Command: []string{"/bin/echo", "hello", "world"}, Env: env, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, } }
func newNode(name string) *api.Node { return &api.Node{ ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.NodeSpec{ ExternalID: name, }, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, } }
func OkStrategy() deployapi.DeploymentStrategy { return deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRecreate, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, RecreateParams: &deployapi.RecreateDeploymentStrategyParams{ TimeoutSeconds: mkintp(20), }, } }
func mockDockerBuild() *buildapi.Build { return &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: "dockerBuild", Labels: map[string]string{ "name": "dockerBuild", }, }, Spec: buildapi.BuildSpec{ Revision: &buildapi.SourceRevision{ Git: &buildapi.GitSourceRevision{}, }, Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ URI: "http://my.build.com/the/dockerbuild/Dockerfile", Ref: "master", }, ContextDir: "my/test/dir", SourceSecret: &kapi.LocalObjectReference{Name: "secretFoo"}, }, Strategy: buildapi.BuildStrategy{ Type: buildapi.DockerBuildStrategyType, DockerStrategy: &buildapi.DockerBuildStrategy{ PullSecret: &kapi.LocalObjectReference{Name: "bar"}, Env: []kapi.EnvVar{ {Name: "ILLEGAL", Value: "foo"}, {Name: "BUILD_LOGLEVEL", Value: "bar"}, }, }, }, Output: buildapi.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "docker-registry/repository/dockerBuild", }, PushSecret: &kapi.LocalObjectReference{Name: "foo"}, }, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, }, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, }, } }
func TestPersistentVolumeGet(t *testing.T) { persistentVolume := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: "foo", }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolume}, } response, err := c.Setup(t).PersistentVolumes().Get("abc") c.Validate(t, response, err) }
func TestPersistentVolumeStatusUpdate(t *testing.T) { persistentVolume := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "abc", ResourceVersion: "1", }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, }, Status: api.PersistentVolumeStatus{ Phase: api.VolumeBound, Message: "foo", }, } c := &testClient{ Request: testRequest{ Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc") + "/status", Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolume}, } response, err := c.Setup(t).PersistentVolumes().UpdateStatus(persistentVolume) c.Validate(t, response, err) }
func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) { vmDiskPath, sizeKB, err := v.manager.CreateVolume(v) if err != nil { return nil, err } pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: v.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "vsphere-volume-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy, AccessModes: v.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ VsphereVolume: &api.VsphereVirtualDiskVolumeSource{ VolumePath: vmDiskPath, FSType: "ext4", }, }, }, } return pv, nil }
func TestPersistentVolumeClaimGet(t *testing.T) { ns := api.NamespaceDefault persistentVolumeClaim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: "foo", }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolumeClaim}, } response, err := c.Setup(t).PersistentVolumeClaims(ns).Get("abc") c.Validate(t, response, err) }
func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { if len(plugin.provisionCalls) <= plugin.provisionCallCounter { return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) } var pv *api.PersistentVolume err := plugin.provisionCalls[plugin.provisionCallCounter] if err == nil { // Create a fake PV with known GCE volume (to match expected volume) pv = &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: plugin.provisionOptions.PVName, }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity, }, AccessModes: plugin.provisionOptions.AccessModes, PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, }, } } plugin.provisionCallCounter++ glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err) return pv, err }
func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, err := c.manager.CreateVolume(c) if err != nil { return nil, err } pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "cinder-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ Cinder: &api.CinderVolumeSource{ VolumeID: volumeID, FSType: "ext4", ReadOnly: false, }, }, }, } return pv, nil }
func makePersistentVolume(serverIP string) *api.PersistentVolume { return &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ GenerateName: "nfs-", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"), }, PersistentVolumeSource: api.PersistentVolumeSource{ NFS: &api.NFSVolumeSource{ Server: serverIP, Path: "/", ReadOnly: false, }, }, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, }, }, } }
func TestPersistentVolumeCreate(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "abc", }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, }, } c := &simple.Client{ Request: simple.Request{ Method: "POST", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", ""), Query: simple.BuildQueryValues(nil), Body: pv, }, Response: simple.Response{StatusCode: 200, Body: pv}, } response, err := c.Setup(t).PersistentVolumes().Create(pv) defer c.Close() c.Validate(t, response, err) }
// withExpectedCapacity sets the claim.Spec.Capacity of the first claim in the // array to given value and returns the array. Meant to be used to compose // claims specified inline in a test. func withExpectedCapacity(capacity string, claims []*api.PersistentVolumeClaim) []*api.PersistentVolumeClaim { claims[0].Status.Capacity = api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), } return claims }
func (rq *ResourceQuotaController) addQuota(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } resourceQuota := obj.(*v1.ResourceQuota) // if we declared an intent that is not yet captured in status (prioritize it) if !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) { rq.missingUsageQueue.Add(key) return } // if we declared a constraint that has no usage (which this controller can calculate, prioritize it) for constraint := range resourceQuota.Status.Hard { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { matchedResources := []api.ResourceName{api.ResourceName(constraint)} for _, evaluator := range rq.registry.Evaluators() { if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 { rq.missingUsageQueue.Add(key) return } } } } // no special priority, go in normal recalc queue rq.queue.Add(key) }
func createMissingPVs(c *k8sclient.Client, ns string) { found, pvcs, pendingClaimNames := findPendingPVs(c, ns) if found { sshCommand := "" createPV(c, ns, pendingClaimNames, sshCommand) items := pvcs.Items for _, item := range items { status := item.Status.Phase if status == api.ClaimPending || status == api.ClaimLost { err := c.PersistentVolumeClaims(ns).Delete(item.ObjectMeta.Name) if err != nil { util.Infof("Error deleting PVC %s\n", item.ObjectMeta.Name) } else { util.Infof("Recreating PVC %s\n", item.ObjectMeta.Name) c.PersistentVolumeClaims(ns).Create(&api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: item.ObjectMeta.Name, Namespace: ns, }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: ns + "-" + item.ObjectMeta.Name, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"), }, }, }, }) } } } } }
func TestCalculateTimeoutForVolume(t *testing.T) { pv := &api.PersistentVolume{ Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("500M"), }, }, } timeout := CalculateTimeoutForVolume(50, 30, pv) if timeout != 50 { t.Errorf("Expected 50 for timeout but got %v", timeout) } pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("2Gi") timeout = CalculateTimeoutForVolume(50, 30, pv) if timeout != 60 { t.Errorf("Expected 60 for timeout but got %v", timeout) } pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("150Gi") timeout = CalculateTimeoutForVolume(50, 30, pv) if timeout != 4500 { t.Errorf("Expected 4500 for timeout but got %v", timeout) } }
// newClaim returns a new claim with given attributes func newClaim(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) *api.PersistentVolumeClaim { claim := api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: testNamespace, UID: types.UID(claimUID), ResourceVersion: "1", }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), }, }, VolumeName: boundToVolume, }, Status: api.PersistentVolumeClaimStatus{ Phase: phase, }, } // Make sure api.GetReference(claim) works claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name) if len(annotations) > 0 { claim.Annotations = make(map[string]string) for _, a := range annotations { claim.Annotations[a] = "yes" } } return &claim }
func (c *awsElasticBlockStoreProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { // Provide dummy api.PersistentVolume.Spec, it will be filled in // awsElasticBlockStoreProvisioner.Provision() return &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ GenerateName: "pv-aws-", Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "aws-ebs-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): c.options.Capacity, }, PersistentVolumeSource: api.PersistentVolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "dummy", FSType: "ext4", Partition: 0, ReadOnly: false, }, }, }, }, nil }
func OkRollingStrategy() deployapi.DeploymentStrategy { return deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{ UpdatePeriodSeconds: mkintp(1), IntervalSeconds: mkintp(1), TimeoutSeconds: mkintp(20), }, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, } }
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume. // This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster. func (r *hostPathCreater) Create() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) err := os.MkdirAll(fullpath, 0750) if err != nil { return nil, err } return &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ GenerateName: "pv-hostpath-", Labels: map[string]string{ "createdby": "hostpath dynamic provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy, AccessModes: r.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: fullpath, }, }, }, }, nil }
func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim { claim := api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ GenerateName: "pvc-", Namespace: ns, }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(requestedSize), }, }, }, } if alpha { claim.Annotations = map[string]string{ storageutil.AlphaStorageClassAnnotation: "", } } else { claim.Annotations = map[string]string{ storageutil.StorageClassAnnotation: "fast", } } return &claim }