예제 #1
0
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())

	capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name: r.options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   r.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): capacity,
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				HostPath: &v1.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}
	if len(r.options.PVC.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = r.plugin.GetAccessModes()
	}

	return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
예제 #2
0
func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name: fc.Options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "fakeplugin-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
			AccessModes:                   fc.Options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				HostPath: &v1.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}

	return pv, nil
}
예제 #3
0
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
	var sku, location, account string

	// maxLength = 79 - (4 for ".vhd") = 75
	name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75)
	capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	requestBytes := capacity.Value()
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))

	// Apply ProvisionerParameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	for k, v := range a.options.Parameters {
		switch strings.ToLower(k) {
		case "skuname":
			sku = v
		case "location":
			location = v
		case "storageaccount":
			account = v
		default:
			return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
		}
	}
	// TODO: implement c.options.ProvisionerSelector parsing
	if a.options.PVC.Spec.Selector != nil {
		return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
	}

	diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name:   a.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "azure-disk-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   a.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				AzureDisk: &v1.AzureDiskVolumeSource{
					DiskName:    diskName,
					DataDiskURI: diskUri,
				},
			},
		},
	}
	return pv, nil
}
예제 #4
0
// NewNode is a helper function for creating Nodes for testing.
func NewNode(name string) *v1.Node {
	return &v1.Node{
		ObjectMeta: v1.ObjectMeta{Name: name},
		Spec: v1.NodeSpec{
			ExternalID: name,
		},
		Status: v1.NodeStatus{
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceCPU):    resource.MustParse("10"),
				v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
			},
		},
	}
}
예제 #5
0
func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {
	err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
	Expect(err).NotTo(HaveOccurred())

	By("checking the claim")
	// Get new copy of the claim
	claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// Get the bound PV
	pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// Check sizes
	expectedCapacity := resource.MustParse(expectedSize)
	pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
	Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

	requestedCapacity := resource.MustParse(requestedSize)
	claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

	// Check PV properties
	Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
	expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
	Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
	Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
	Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

	// We start two pods:
	// - The first writes 'hello word' to the /mnt/test (= the volume).
	// - The second one runs grep 'hello world' on /mnt/test.
	// If both succeed, Kubernetes actually allocated something that is
	// persistent across pods.
	By("checking the created volume is writable")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")

	By("checking the created volume is readable and retains data")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")

	By("deleting the claim")
	framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))

	// Wait for the PV to get deleted. Technically, the first few delete
	// attempts may fail, as the volume is still attached to a node because
	// kubelet is slowly cleaning up a pod, however it should succeed in a
	// couple of minutes. Wait 20 minutes to recover from random cloud hiccups.
	framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
예제 #6
0
func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
	var err error
	if r.options.PVC.Spec.Selector != nil {
		glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
		return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
	}
	glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)

	cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient())
	if err != nil {
		return nil, err
	}
	r.provisioningConfig = *cfg

	glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisioningConfig)
	glusterfs, sizeGB, err := r.CreateVolume()
	if err != nil {
		glog.Errorf("glusterfs: create volume err: %v.", err)
		return nil, fmt.Errorf("glusterfs: create volume err: %v.", err)
	}
	pv := new(v1.PersistentVolume)
	pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
	pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
	pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
	if len(pv.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = r.plugin.GetAccessModes()
	}
	pv.Spec.Capacity = v1.ResourceList{
		v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
	}
	return pv, nil
}
예제 #7
0
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
	capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
	// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
	// to simulate a size constraint we could set here a Quota
	volumeRequest := &quobyte_api.CreateVolumeRequest{
		Name:              provisioner.volume,
		RootUserID:        provisioner.user,
		RootGroupID:       provisioner.group,
		TenantID:          provisioner.tenant,
		ConfigurationName: provisioner.config,
	}

	if _, err := manager.createQuobyteClient().CreateVolume(volumeRequest); err != nil {
		return &v1.QuobyteVolumeSource{}, volumeSize, err
	}

	glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume)
	return &v1.QuobyteVolumeSource{
		Registry: provisioner.registry,
		Volume:   provisioner.volume,
		User:     provisioner.user,
		Group:    provisioner.group,
	}, volumeSize, nil
}
예제 #8
0
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
	vmDiskPath, sizeKB, err := v.manager.CreateVolume(v)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name:   v.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "vsphere-volume-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   v.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
					VolumePath: vmDiskPath,
					FSType:     "ext4",
				},
			},
		},
	}
	if len(v.options.PVC.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = v.plugin.GetAccessModes()
	}

	return pv, nil
}
예제 #9
0
func TestCalculateTimeoutForVolume(t *testing.T) {
	pv := &v1.PersistentVolume{
		Spec: v1.PersistentVolumeSpec{
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
			},
		},
	}

	timeout := CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 50 {
		t.Errorf("Expected 50 for timeout but got %v", timeout)
	}

	pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
	timeout = CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 60 {
		t.Errorf("Expected 60 for timeout but got %v", timeout)
	}

	pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
	timeout = CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 4500 {
		t.Errorf("Expected 4500 for timeout but got %v", timeout)
	}
}
예제 #10
0
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
	pdID, sizeGB, err := p.manager.CreateVolume(p)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name:   p.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "photon-volume-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   p.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
					PdID:   pdID,
					FSType: "ext4",
				},
			},
		},
	}
	if len(p.options.PVC.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = p.plugin.GetAccessModes()
	}

	return pv, nil
}
예제 #11
0
func newClaim(ns string, alpha bool) *v1.PersistentVolumeClaim {
	claim := v1.PersistentVolumeClaim{
		ObjectMeta: v1.ObjectMeta{
			GenerateName: "pvc-",
			Namespace:    ns,
		},
		Spec: v1.PersistentVolumeClaimSpec{
			AccessModes: []v1.PersistentVolumeAccessMode{
				v1.ReadWriteOnce,
			},
			Resources: v1.ResourceRequirements{
				Requests: v1.ResourceList{
					v1.ResourceName(v1.ResourceStorage): resource.MustParse(requestedSize),
				},
			},
		},
	}

	if alpha {
		claim.Annotations = map[string]string{
			storageutil.AlphaStorageClassAnnotation: "",
		}
	} else {
		claim.Annotations = map[string]string{
			storageutil.StorageClassAnnotation: "fast",
		}

	}

	return &claim
}
예제 #12
0
// Returns a PVC definition based on the namespace.
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not
//   known until the PV is instantiated, then the func createPVPVC will add
//   pvc.Spec.VolumeName to this claim.
func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
	// Specs are expected to match this test's PersistentVolume

	return &v1.PersistentVolumeClaim{
		ObjectMeta: metav1.ObjectMeta{
			GenerateName: "pvc-",
			Namespace:    ns,
			Annotations: map[string]string{
				"volume.beta.kubernetes.io/storage-class": "",
			},
		},
		Spec: v1.PersistentVolumeClaimSpec{
			AccessModes: []v1.PersistentVolumeAccessMode{
				v1.ReadWriteOnce,
				v1.ReadOnlyMany,
				v1.ReadWriteMany,
			},
			Resources: v1.ResourceRequirements{
				Requests: v1.ResourceList{
					v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
				},
			},
		},
	}
}
예제 #13
0
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, size int, err error) {
	capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	volSizeBytes := capacity.Value()
	// convert to MB that rbd defaults on
	sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
	volSz := fmt.Sprintf("%d", sz)
	// rbd create
	l := len(p.rbdMounter.Mon)
	// pick a mon randomly
	start := rand.Int() % l
	// iterate all monitors until create succeeds.
	for i := start; i < start+l; i++ {
		mon := p.Mon[i%l]
		glog.V(4).Infof("rbd: create %s size %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
		var output []byte
		output, err = p.rbdMounter.plugin.execCommand("rbd",
			[]string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", "1"})
		if err == nil {
			break
		} else {
			glog.Warningf("failed to create rbd image, output %v", string(output))
		}
	}

	if err != nil {
		glog.Errorf("rbd: Error creating rbd image: %v", err)
		return nil, 0, err
	}

	return &v1.RBDVolumeSource{
		CephMonitors: p.rbdMounter.Mon,
		RBDImage:     p.rbdMounter.Image,
		RBDPool:      p.rbdMounter.Pool,
	}, sz, nil
}
예제 #14
0
func componentResources(cpu string) api.ResourceRequirements {
	return api.ResourceRequirements{
		Requests: api.ResourceList{
			api.ResourceName(api.ResourceCPU): resource.MustParse(cpu),
		},
	}
}
예제 #15
0
// withExpectedCapacity sets the claim.Spec.Capacity of the first claim in the
// array to given value and returns the array.  Meant to be used to compose
// claims specified inline in a test.
func withExpectedCapacity(capacity string, claims []*v1.PersistentVolumeClaim) []*v1.PersistentVolumeClaim {
	claims[0].Status.Capacity = v1.ResourceList{
		v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
	}

	return claims
}
예제 #16
0
func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume {
	// Specs are expected to match this test's PersistentVolumeClaim

	var claimRef *v1.ObjectReference

	if pvConfig.prebind != nil {
		claimRef = &v1.ObjectReference{
			Name:      pvConfig.prebind.Name,
			Namespace: pvConfig.prebind.Namespace,
		}
	}

	return &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			GenerateName: pvConfig.namePrefix,
			Annotations: map[string]string{
				volumehelper.VolumeGidAnnotationKey: "777",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRecycle,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
			},
			PersistentVolumeSource: pvConfig.pvSource,
			AccessModes: []v1.PersistentVolumeAccessMode{
				v1.ReadWriteOnce,
				v1.ReadOnlyMany,
				v1.ReadWriteMany,
			},
			ClaimRef: claimRef,
		},
	}
}
예제 #17
0
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
	volumeID, sizeGB, err := c.manager.CreateVolume(c)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name:   c.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "cinder-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   c.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				Cinder: &v1.CinderVolumeSource{
					VolumeID: volumeID,
					FSType:   "ext4",
					ReadOnly: false,
				},
			},
		},
	}
	if len(c.options.PVC.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = c.plugin.GetAccessModes()
	}

	return pv, nil
}
예제 #18
0
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
	if provisioner.options.PVC.Spec.Selector != nil {
		return nil, fmt.Errorf("claim Selector is not supported")
	}
	provisioner.config = "BASE"
	provisioner.tenant = "DEFAULT"

	cfg, err := parseAPIConfig(provisioner.plugin, provisioner.options.Parameters)
	if err != nil {
		return nil, err
	}
	for k, v := range provisioner.options.Parameters {
		switch gostrings.ToLower(k) {
		case "registry":
			provisioner.registry = v
		case "user":
			provisioner.user = v
		case "group":
			provisioner.group = v
		case "quobytetenant":
			provisioner.tenant = v
		case "quobyteconfig":
			provisioner.config = v
		case "adminsecretname",
			"adminsecretnamespace",
			"quobyteapiserver":
			continue
		default:
			return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, provisioner.plugin.GetPluginName())
		}
	}

	if !validateRegistry(provisioner.registry) {
		return nil, fmt.Errorf("Quoybte registry missing or malformed: must be a host:port pair or multiple pairs separated by commas")
	}

	// create random image name
	provisioner.volume = fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())

	manager := &quobyteVolumeManager{
		config: cfg,
	}

	vol, sizeGB, err := manager.createVolume(provisioner)
	if err != nil {
		return nil, err
	}
	pv := new(v1.PersistentVolume)
	pv.Spec.PersistentVolumeSource.Quobyte = vol
	pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
	pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
	if len(pv.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = provisioner.plugin.GetAccessModes()
	}
	pv.Spec.Capacity = v1.ResourceList{
		v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
	}
	return pv, nil
}
예제 #19
0
func autoConvert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {
	if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
		defaulting.(func(*federation.ClusterStatus))(in)
	}
	if in.Conditions != nil {
		in, out := &in.Conditions, &out.Conditions
		*out = make([]ClusterCondition, len(*in))
		for i := range *in {
			if err := Convert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {
				return err
			}
		}
	} else {
		out.Conditions = nil
	}
	if in.Capacity != nil {
		in, out := &in.Capacity, &out.Capacity
		*out = make(v1.ResourceList, len(*in))
		for key, val := range *in {
			newVal := new(resource.Quantity)
			if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
				return err
			}
			(*out)[v1.ResourceName(key)] = *newVal
		}
	} else {
		out.Capacity = nil
	}
	if in.Allocatable != nil {
		in, out := &in.Allocatable, &out.Allocatable
		*out = make(v1.ResourceList, len(*in))
		for key, val := range *in {
			newVal := new(resource.Quantity)
			if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
				return err
			}
			(*out)[v1.ResourceName(key)] = *newVal
		}
	} else {
		out.Allocatable = nil
	}
	if err := Convert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(&in.ClusterMeta, &out.ClusterMeta, s); err != nil {
		return err
	}
	return nil
}
예제 #20
0
// CreateVolume creates a GCE PD.
// Returns: volumeID, volumeSizeGB, labels, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, error) {
	cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
	if err != nil {
		return "", 0, nil, err
	}

	name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
	capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	requestBytes := capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// Apply Parameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	diskType := ""
	zone := ""
	for k, v := range c.options.Parameters {
		switch strings.ToLower(k) {
		case "type":
			diskType = v
		case "zone":
			zone = v
		default:
			return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
		}
	}

	// TODO: implement PVC.Selector parsing
	if c.options.PVC.Spec.Selector != nil {
		return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
	}

	if zone == "" {
		// No zone specified, choose one randomly in the same region as the
		// node is running.
		zones, err := cloud.GetAllZones()
		if err != nil {
			glog.V(2).Infof("error getting zone information from GCE: %v", err)
			return "", 0, nil, err
		}
		zone = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
	}

	err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, nil, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)

	labels, err := cloud.GetAutoLabelsForPD(name, zone)
	if err != nil {
		// We don't really want to leak the volume here...
		glog.Errorf("error getting labels for volume %q: %v", name, err)
	}

	return name, int(requestGB), labels, nil
}
예제 #21
0
func autoConvert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {
	if in.Conditions != nil {
		in, out := &in.Conditions, &out.Conditions
		*out = make([]ClusterCondition, len(*in))
		for i := range *in {
			if err := Convert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {
				return err
			}
		}
	} else {
		out.Conditions = nil
	}
	if in.Capacity != nil {
		in, out := &in.Capacity, &out.Capacity
		*out = make(v1.ResourceList, len(*in))
		for key, val := range *in {
			newVal := new(resource.Quantity)
			if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
				return err
			}
			(*out)[v1.ResourceName(key)] = *newVal
		}
	} else {
		out.Capacity = nil
	}
	if in.Allocatable != nil {
		in, out := &in.Allocatable, &out.Allocatable
		*out = make(v1.ResourceList, len(*in))
		for key, val := range *in {
			newVal := new(resource.Quantity)
			if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil {
				return err
			}
			(*out)[v1.ResourceName(key)] = *newVal
		}
	} else {
		out.Allocatable = nil
	}
	if err := Convert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(&in.ClusterMeta, &out.ClusterMeta, s); err != nil {
		return err
	}
	out.Zones = in.Zones
	out.Region = in.Region
	return nil
}
예제 #22
0
func convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(in *expapi.ResourceConsumption, out *ResourceConsumption, s conversion.Scope) error {
	if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
		defaulting.(func(*expapi.ResourceConsumption))(in)
	}
	out.Resource = v1.ResourceName(in.Resource)
	if err := s.Convert(&in.Quantity, &out.Quantity, 0); err != nil {
		return err
	}
	return nil
}
예제 #23
0
func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
	return &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{Name: name},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeSource:        v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: path}},
			Capacity:                      v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)},
			AccessModes:                   mode,
			PersistentVolumeReclaimPolicy: reclaim,
		},
	}
}
예제 #24
0
func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
	var err error
	if r.options.PVC.Spec.Selector != nil {
		glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
		return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
	}
	glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)
	scName := storageutil.GetClaimStorageClass(r.options.PVC)
	cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient())
	if err != nil {
		return nil, err
	}
	r.provisioningConfig = *cfg

	glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisioningConfig)

	gidTable, err := r.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
	if err != nil {
		return nil, fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
	}

	gid, _, err := gidTable.AllocateNext()
	if err != nil {
		glog.Errorf("glusterfs: failed to reserve gid from table: %v", err)
		return nil, fmt.Errorf("glusterfs: failed to reserve gid from table: %v", err)
	}

	glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, r.options.PVC.Name)

	glusterfs, sizeGB, err := r.CreateVolume(gid)
	if err != nil {
		if release_err := gidTable.Release(gid); release_err != nil {
			glog.Errorf("glusterfs:  error when releasing gid in storageclass: %s", scName)
		}

		glog.Errorf("glusterfs: create volume err: %v.", err)
		return nil, fmt.Errorf("glusterfs: create volume err: %v.", err)
	}
	pv := new(v1.PersistentVolume)
	pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
	pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
	pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
	if len(pv.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = r.plugin.GetAccessModes()
	}

	gidStr := strconv.FormatInt(int64(gid), 10)
	pv.Annotations = map[string]string{volumehelper.VolumeGidAnnotationKey: gidStr}

	pv.Spec.Capacity = v1.ResourceList{
		v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
	}
	return pv, nil
}
예제 #25
0
func createPVC(name, namespace, cap string, mode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
	return &v1.PersistentVolumeClaim{
		ObjectMeta: v1.ObjectMeta{
			Name:      name,
			Namespace: namespace,
		},
		Spec: v1.PersistentVolumeClaimSpec{
			Resources:   v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)}},
			AccessModes: mode,
		},
	}
}
예제 #26
0
func testVolume(name, size string) *v1.PersistentVolume {
	return &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name:        name,
			Annotations: map[string]string{},
		},
		Spec: v1.PersistentVolumeSpec{
			Capacity:               v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(size)},
			PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
			AccessModes:            []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
		},
	}
}
예제 #27
0
파일: qos.go 프로젝트: alex-mohr/kubernetes
// allResources returns a set of all possible resources whose mapped key value is true if present on the container
func allResources(container *v1.Container) map[v1.ResourceName]bool {
	resources := map[v1.ResourceName]bool{}
	for _, resource := range supportedQoSComputeResources.List() {
		resources[v1.ResourceName(resource)] = false
	}
	for resource := range container.Resources.Requests {
		resources[resource] = true
	}
	for resource := range container.Resources.Limits {
		resources[resource] = true
	}
	return resources
}
예제 #28
0
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {

	if len(c.options.Parameters) > 0 {
		return nil, fmt.Errorf("Provisioning failed: Specified at least one unsupported parameter")
	}

	if c.options.PVC.Spec.Selector != nil {
		return nil, fmt.Errorf("Provisioning failed: Specified unsupported selector")
	}

	datasetUUID, sizeGB, labels, err := c.manager.CreateVolume(c)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: v1.ObjectMeta{
			Name:   c.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "flocker-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   c.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				Flocker: &v1.FlockerVolumeSource{
					DatasetUUID: datasetUUID,
				},
			},
		},
	}
	if len(c.options.PVC.Spec.AccessModes) == 0 {
		pv.Spec.AccessModes = c.plugin.GetAccessModes()
	}

	if len(labels) != 0 {
		if pv.Labels == nil {
			pv.Labels = make(map[string]string)
		}
		for k, v := range labels {
			pv.Labels[k] = v
		}
	}

	return pv, nil
}
예제 #29
0
func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
	metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
	params := map[string]string{"labelSelector": selector.String()}

	resultRaw, err := h.services.
		ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
		DoRaw()
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
	}

	glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))

	metrics := metrics_api.PodMetricsList{}
	err = json.Unmarshal(resultRaw, &metrics)
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
	}

	if len(metrics.Items) == 0 {
		return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
	}

	res := make(PodResourceInfo, len(metrics.Items))

	for _, m := range metrics.Items {
		podSum := int64(0)
		missing := len(m.Containers) == 0
		for _, c := range m.Containers {
			resValue, found := c.Usage[v1.ResourceName(resource)]
			if !found {
				missing = true
				glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
				continue
			}
			podSum += resValue.MilliValue()
		}

		if !missing {
			res[m.Name] = int64(podSum)
		}
	}

	timestamp := time.Time{}
	if len(metrics.Items) > 0 {
		timestamp = metrics.Items[0].Timestamp.Time
	}

	return res, timestamp, nil
}
예제 #30
0
func (plugin *mockVolumePlugin) Provision() (*v1.PersistentVolume, error) {
	if len(plugin.provisionCalls) <= plugin.provisionCallCounter {
		return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter)
	}

	var pv *v1.PersistentVolume
	call := plugin.provisionCalls[plugin.provisionCallCounter]
	if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
		glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
		return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
	}
	if call.ret == nil {
		// Create a fake PV with known GCE volume (to match expected volume)
		capacity := plugin.provisionOptions.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
		accessModes := plugin.provisionOptions.PVC.Spec.AccessModes
		pv = &v1.PersistentVolume{
			ObjectMeta: v1.ObjectMeta{
				Name: plugin.provisionOptions.PVName,
			},
			Spec: v1.PersistentVolumeSpec{
				Capacity: v1.ResourceList{
					v1.ResourceName(v1.ResourceStorage): capacity,
				},
				AccessModes:                   accessModes,
				PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy,
				PersistentVolumeSource: v1.PersistentVolumeSource{
					GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
				},
			},
		}
	}

	plugin.provisionCallCounter++
	glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
	return pv, call.ret
}