예제 #1
0
파일: rbd_util.go 프로젝트: ncdc/kubernetes
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSource, size int, err error) {
	volSizeBytes := p.options.Capacity.Value()
	// convert to MB that rbd defaults on
	sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
	volSz := fmt.Sprintf("%d", sz)
	// rbd create
	l := len(p.rbdMounter.Mon)
	// pick a mon randomly
	start := rand.Int() % l
	// iterate all monitors until create succeeds.
	for i := start; i < start+l; i++ {
		mon := p.Mon[i%l]
		glog.V(4).Infof("rbd: create %s size %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
		var output []byte
		output, err = p.rbdMounter.plugin.execCommand("rbd",
			[]string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", "1"})
		if err == nil {
			break
		} else {
			glog.Warningf("failed to create rbd image, output %v", string(output))
		}
	}

	if err != nil {
		glog.Errorf("rbd: Error creating rbd image: %v", err)
		return nil, 0, err
	}

	return &api.RBDVolumeSource{
		CephMonitors: p.rbdMounter.Mon,
		RBDImage:     p.rbdMounter.Image,
		RBDPool:      p.rbdMounter.Pool,
	}, sz, nil
}
예제 #2
0
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) {
	volSizeBytes := p.options.Capacity.Value()
	sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
	glog.V(2).Infof("glusterfs: create volume of size:%d bytes", volSizeBytes)
	if p.glusterfsClusterConf.glusterRestUrl == "" {
		glog.Errorf("glusterfs : rest server endpoint is empty")
		return nil, 0, fmt.Errorf("failed to create gluster REST client, REST URL is empty")
	}
	cli := gcli.NewClient(p.glusterRestUrl, p.glusterRestUser, p.glusterRestUserKey)
	if cli == nil {
		glog.Errorf("glusterfs: failed to create gluster rest client")
		return nil, 0, fmt.Errorf("failed to create gluster REST client, REST server authentication failed")
	}
	volumeReq := &gapi.VolumeCreateRequest{Size: sz, Durability: gapi.VolumeDurabilityInfo{Type: durabilitytype, Replicate: gapi.ReplicaDurability{Replica: replicacount}}}
	volume, err := cli.VolumeCreate(volumeReq)
	if err != nil {
		glog.Errorf("glusterfs: error creating volume %s ", err)
		return nil, 0, fmt.Errorf("error creating volume %v", err)
	}
	glog.V(1).Infof("glusterfs: volume with size :%d and name:%s created", volume.Size, volume.Name)
	return &api.GlusterfsVolumeSource{
		EndpointsName: p.glusterfsClusterConf.glusterep,
		Path:          volume.Name,
		ReadOnly:      false,
	}, sz, nil
}
예제 #3
0
// CreateVolume creates a GCE PD.
// Returns: volumeID, volumeSizeGB, labels, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, error) {
	cloud, err := getCloudProvider(c.gcePersistentDisk.plugin)
	if err != nil {
		return "", 0, nil, err
	}

	name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
	requestBytes := c.options.Capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// The disk will be created in the zone in which this code is currently running
	// TODO: We should support auto-provisioning volumes in multiple/specified zones
	zone, err := cloud.GetZone()
	if err != nil {
		glog.V(2).Infof("error getting zone information from GCE: %v", err)
		return "", 0, nil, err
	}

	err = cloud.CreateDisk(name, zone.FailureDomain, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, nil, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)

	labels, err := cloud.GetAutoLabelsForPD(name)
	if err != nil {
		// We don't really want to leak the volume here...
		glog.Errorf("error getting labels for volume %q: %v", name, err)
	}

	return name, int(requestGB), labels, nil
}
예제 #4
0
파일: gce_util.go 프로젝트: richm/origin
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID())
	requestBytes := c.options.Capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// The disk will be created in the zone in which this code is currently running
	// TODO: We should support auto-provisioning volumes in multiple/specified zones
	zone, err := cloud.GetZone()
	if err != nil {
		glog.V(2).Infof("error getting zone information from GCE: %v", err)
		return "", 0, err
	}

	err = cloud.CreateDisk(name, zone.FailureDomain, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)
	return name, int(requestGB), nil
}
예제 #5
0
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *api.QuobyteVolumeSource, size int, err error) {
	capacity := provisioner.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
	// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
	// to simulate a size constraint we could set here a Quota
	volumeRequest := &quobyte_api.CreateVolumeRequest{
		Name:              provisioner.volume,
		RootUserID:        provisioner.user,
		RootGroupID:       provisioner.group,
		TenantID:          provisioner.tenant,
		ConfigurationName: provisioner.config,
	}

	if _, err := manager.createQuobyteClient().CreateVolume(volumeRequest); err != nil {
		return &api.QuobyteVolumeSource{}, volumeSize, err
	}

	glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume)
	return &api.QuobyteVolumeSource{
		Registry: provisioner.registry,
		Volume:   provisioner.volume,
		User:     provisioner.user,
		Group:    provisioner.group,
	}, volumeSize, nil
}
예제 #6
0
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	// AWS volumes don't have Name field, store the name in Name tag
	var tags map[string]string
	if c.options.CloudTags == nil {
		tags = make(map[string]string)
	} else {
		tags = *c.options.CloudTags
	}
	tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters

	requestBytes := c.options.Capacity.Value()
	// AWS works with gigabytes, convert to GiB with rounding up
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
	volumeOptions := &aws.VolumeOptions{
		CapacityGB: requestGB,
		Tags:       &tags,
	}

	name, err := cloud.CreateDisk(volumeOptions)
	if err != nil {
		glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
	return name, int(requestGB), nil
}
예제 #7
0
// CreateVolume creates a GCE PD.
// Returns: volumeID, volumeSizeGB, labels, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, error) {
	cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
	if err != nil {
		return "", 0, nil, err
	}

	name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
	capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	requestBytes := capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// Apply Parameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	diskType := ""
	zone := ""
	for k, v := range c.options.Parameters {
		switch strings.ToLower(k) {
		case "type":
			diskType = v
		case "zone":
			zone = v
		default:
			return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
		}
	}

	// TODO: implement PVC.Selector parsing
	if c.options.PVC.Spec.Selector != nil {
		return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
	}

	if zone == "" {
		// No zone specified, choose one randomly in the same region as the
		// node is running.
		zones, err := cloud.GetAllZones()
		if err != nil {
			glog.V(2).Infof("error getting zone information from GCE: %v", err)
			return "", 0, nil, err
		}
		zone = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
	}

	err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, nil, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)

	labels, err := cloud.GetAutoLabelsForPD(name, zone)
	if err != nil {
		// We don't really want to leak the volume here...
		glog.Errorf("error getting labels for volume %q: %v", name, err)
	}

	return name, int(requestGB), labels, nil
}
예제 #8
0
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
	var sku, location, account string

	// maxLength = 79 - (4 for ".vhd") = 75
	name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75)
	capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	requestBytes := capacity.Value()
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))

	// Apply ProvisionerParameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	for k, v := range a.options.Parameters {
		switch strings.ToLower(k) {
		case "skuname":
			sku = v
		case "location":
			location = v
		case "storageaccount":
			account = v
		default:
			return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
		}
	}
	// TODO: implement c.options.ProvisionerSelector parsing
	if a.options.PVC.Spec.Selector != nil {
		return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
	}

	diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
	if err != nil {
		return nil, err
	}

	pv := &v1.PersistentVolume{
		ObjectMeta: metav1.ObjectMeta{
			Name:   a.options.PVName,
			Labels: map[string]string{},
			Annotations: map[string]string{
				"kubernetes.io/createdby": "azure-disk-dynamic-provisioner",
			},
		},
		Spec: v1.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   a.options.PVC.Spec.AccessModes,
			Capacity: v1.ResourceList{
				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
			},
			PersistentVolumeSource: v1.PersistentVolumeSource{
				AzureDisk: &v1.AzureDiskVolumeSource{
					DiskName:    diskName,
					DataDiskURI: diskUri,
				},
			},
		},
	}
	return pv, nil
}
예제 #9
0
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := c.plugin.getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	volSizeBytes := c.options.Capacity.Value()
	// Cinder works with gigabytes, convert to GiB with rounding up
	volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
	name, err := cloud.CreateVolume(volSizeGB)
	if err != nil {
		glog.V(2).Infof("Error creating cinder volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created cinder volume %s", name)
	return name, volSizeGB, nil
}
예제 #10
0
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {

	if c.flockerClient == nil {
		c.flockerClient, err = c.plugin.newFlockerClient("")
		if err != nil {
			return
		}
	}

	nodes, err := c.flockerClient.ListNodes()
	if err != nil {
		return
	}
	if len(nodes) < 1 {
		err = fmt.Errorf("no nodes found inside the flocker cluster to provision a dataset")
		return
	}

	// select random node
	rand.Seed(time.Now().UTC().UnixNano())
	node := nodes[rand.Intn(len(nodes))]
	glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)

	capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	requestBytes := capacity.Value()
	volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))

	createOptions := &flockerApi.CreateDatasetOptions{
		MaximumSize: requestBytes,
		Metadata: map[string]string{
			"type": "k8s-dynamic-prov",
			"pvc":  c.options.PVC.Name,
		},
		Primary: node.UUID,
	}

	datasetState, err := c.flockerClient.CreateDataset(createOptions)
	if err != nil {
		return
	}
	datasetUUID = datasetState.DatasetID

	glog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID)

	return
}
예제 #11
0
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) {
	cloud, err := v.plugin.getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	volSizeBytes := v.options.Capacity.Value()
	// vSphere works with kilobytes, convert to KiB with rounding up
	volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
	name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
	vmDiskPath, err = cloud.CreateVolume(name, volSizeKB, v.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating vsphere volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created vsphere volume %s", name)
	return vmDiskPath, volSizeKB, nil
}
예제 #12
0
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID())
	requestBytes := c.options.Capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
	err = cloud.CreateDisk(name, int64(requestGB))
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)
	return name, int(requestGB), nil
}
예제 #13
0
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) {
	cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider())
	if err != nil {
		return "", 0, err
	}

	capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	volSizeBytes := capacity.Value()
	// vSphere works with kilobytes, convert to KiB with rounding up
	volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
	name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
	volumeOptions := &vsphere.VolumeOptions{
		CapacityKB: volSizeKB,
		Tags:       *v.options.CloudTags,
		Name:       name,
	}

	// Apply Parameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	for parameter, value := range v.options.Parameters {
		switch strings.ToLower(parameter) {
		case "diskformat":
			volumeOptions.DiskFormat = value
		default:
			return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
		}
	}

	// TODO: implement PVC.Selector parsing
	if v.options.PVC.Spec.Selector != nil {
		return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
	}

	vmDiskPath, err = cloud.CreateVolume(volumeOptions)
	if err != nil {
		glog.V(2).Infof("Error creating vsphere volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created vsphere volume %s", name)
	return vmDiskPath, volSizeKB, nil
}
예제 #14
0
파일: aws_util.go 프로젝트: richm/origin
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	requestBytes := c.options.Capacity.Value()
	// The cloud provider works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	volumeOptions := &aws.VolumeOptions{}
	volumeOptions.CapacityGB = int(requestGB)

	name, err := cloud.CreateDisk(volumeOptions)
	if err != nil {
		glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created EBS Disk volume %s", name)
	return name, int(requestGB), nil
}
예제 #15
0
파일: aws_util.go 프로젝트: Cloven/minikube
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (string, int, map[string]string, error) {
	cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
	if err != nil {
		return "", 0, nil, err
	}

	// AWS volumes don't have Name field, store the name in Name tag
	var tags map[string]string
	if c.options.CloudTags == nil {
		tags = make(map[string]string)
	} else {
		tags = *c.options.CloudTags
	}
	tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters

	requestBytes := c.options.Capacity.Value()
	// AWS works with gigabytes, convert to GiB with rounding up
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
	volumeOptions := &aws.VolumeOptions{
		CapacityGB: requestGB,
		Tags:       tags,
		PVCName:    c.options.PVCName,
	}

	name, err := cloud.CreateDisk(volumeOptions)
	if err != nil {
		glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
		return "", 0, nil, err
	}
	glog.V(2).Infof("Successfully created EBS Disk volume %s", name)

	labels, err := cloud.GetVolumeLabels(name)
	if err != nil {
		// We don't really want to leak the volume here...
		glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
	}

	return name, int(requestGB), labels, nil
}
예제 #16
0
func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := c.plugin.getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	volSizeBytes := capacity.Value()
	// Cinder works with gigabytes, convert to GiB with rounding up
	volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
	name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
	vtype := ""
	availability := ""
	// Apply ProvisionerParameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	for k, v := range c.options.Parameters {
		switch strings.ToLower(k) {
		case "type":
			vtype = v
		case "availability":
			availability = v
		default:
			return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
		}
	}
	// TODO: implement PVC.Selector parsing
	if c.options.PVC.Spec.Selector != nil {
		return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
	}

	name, err = cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating cinder volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created cinder volume %s", name)
	return name, volSizeGB, nil
}
예제 #17
0
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (volumeID string, volumeSizeGB int, err error) {
	volumes, err := c.getVolumeProvider()
	if err != nil {
		glog.V(2).Info("Error getting volume provider: ", err)
		return "", 0, err
	}

	requestBytes := c.options.Capacity.Value()
	// AWS works with gigabytes, convert to GiB with rounding up
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
	volSpec := &aws_cloud.VolumeOptions{
		CapacityGB: requestGB,
		Tags:       c.options.CloudTags,
	}

	name, err := volumes.CreateVolume(volSpec)
	if err != nil {
		glog.V(2).Infof("Error creating AWS EBS volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created AWS EBS volume %s", name)
	return name, requestGB, nil
}
// CreateVolume creates a PhotonController persistent disk.
func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, err error) {
	cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
	if err != nil {
		glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err)
		return "", 0, err
	}

	capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	volSizeBytes := capacity.Value()
	// PhotonController works with GB, convert to GB with rounding up
	volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
	name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
	volumeOptions := &photon.VolumeOptions{
		CapacityGB: volSizeGB,
		Tags:       *p.options.CloudTags,
		Name:       name,
	}

	for parameter, value := range p.options.Parameters {
		switch strings.ToLower(parameter) {
		case "flavor":
			volumeOptions.Flavor = value
		default:
			glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
			return "", 0, fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName())
		}
	}

	pdID, err = cloud.CreateDisk(volumeOptions)
	if err != nil {
		glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err)
		return "", 0, err
	}

	glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name)
	return pdID, volSizeGB, nil
}
예제 #19
0
// CreateVolume creates an AWS EBS volume.
// Returns: volumeID, volumeSizeGB, labels, error
func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.KubernetesVolumeID, int, map[string]string, error) {
	cloud, err := getCloudProvider(c.awsElasticBlockStore.plugin.host.GetCloudProvider())
	if err != nil {
		return "", 0, nil, err
	}

	// AWS volumes don't have Name field, store the name in Name tag
	var tags map[string]string
	if c.options.CloudTags == nil {
		tags = make(map[string]string)
	} else {
		tags = *c.options.CloudTags
	}
	tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters

	capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	requestBytes := capacity.Value()
	// AWS works with gigabytes, convert to GiB with rounding up
	requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
	volumeOptions := &aws.VolumeOptions{
		CapacityGB: requestGB,
		Tags:       tags,
		PVCName:    c.options.PVC.Name,
	}
	// Apply Parameters (case-insensitive). We leave validation of
	// the values to the cloud provider.
	for k, v := range c.options.Parameters {
		switch strings.ToLower(k) {
		case "type":
			volumeOptions.VolumeType = v
		case "zone":
			volumeOptions.AvailabilityZone = v
		case "iopspergb":
			volumeOptions.IOPSPerGB, err = strconv.Atoi(v)
			if err != nil {
				return "", 0, nil, fmt.Errorf("invalid iopsPerGB value %q, must be integer between 1 and 30: %v", v, err)
			}
		case "encrypted":
			volumeOptions.Encrypted, err = strconv.ParseBool(v)
			if err != nil {
				return "", 0, nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err)
			}
		case "kmskeyid":
			volumeOptions.KmsKeyId = v
		default:
			return "", 0, nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
		}
	}

	// TODO: implement PVC.Selector parsing
	if c.options.PVC.Spec.Selector != nil {
		return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
	}

	name, err := cloud.CreateDisk(volumeOptions)
	if err != nil {
		glog.V(2).Infof("Error creating EBS Disk volume: %v", err)
		return "", 0, nil, err
	}
	glog.V(2).Infof("Successfully created EBS Disk volume %s", name)

	labels, err := cloud.GetVolumeLabels(name)
	if err != nil {
		// We don't really want to leak the volume here...
		glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
	}

	return name, int(requestGB), labels, nil
}
예제 #20
0
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) {
	var clusterIds []string
	capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	volSizeBytes := capacity.Value()
	sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
	glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig)
	if p.url == "" {
		glog.Errorf("glusterfs : rest server endpoint is empty")
		return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
	}
	cli := gcli.NewClient(p.url, p.user, p.secretValue)
	if cli == nil {
		glog.Errorf("glusterfs: failed to create glusterfs rest client")
		return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
	}
	if p.provisioningConfig.clusterId != "" {
		clusterIds = dstrings.Split(p.clusterId, ",")
		glog.V(4).Infof("glusterfs: provided clusterids: %v", clusterIds)
	}
	gid64 := int64(gid)
	volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIds, Gid: gid64, Durability: gapi.VolumeDurabilityInfo{Type: durabilityType, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}}
	volume, err := cli.VolumeCreate(volumeReq)
	if err != nil {
		glog.Errorf("glusterfs: error creating volume %v ", err)
		return nil, 0, fmt.Errorf("error creating volume %v", err)
	}
	glog.V(1).Infof("glusterfs: volume with size: %d and name: %s created", volume.Size, volume.Name)
	clusterinfo, err := cli.ClusterInfo(volume.Cluster)
	if err != nil {
		glog.Errorf("glusterfs: failed to get cluster details: %v", err)
		return nil, 0, fmt.Errorf("failed to get cluster details: %v", err)
	}
	// For the above dynamically provisioned volume, we gather the list of node IPs
	// of the cluster on which provisioned volume belongs to, as there can be multiple
	// clusters.
	var dynamicHostIps []string
	for _, node := range clusterinfo.Nodes {
		nodei, err := cli.NodeInfo(string(node))
		if err != nil {
			glog.Errorf("glusterfs: failed to get hostip: %v", err)
			return nil, 0, fmt.Errorf("failed to get hostip: %v", err)
		}
		ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
		dynamicHostIps = append(dynamicHostIps, ipaddr)
	}
	glog.V(3).Infof("glusterfs: hostlist :%v", dynamicHostIps)
	if len(dynamicHostIps) == 0 {
		glog.Errorf("glusterfs: no hosts found: %v", err)
		return nil, 0, fmt.Errorf("no hosts found: %v", err)
	}

	// The 'endpointname' is created in form of 'gluster-dynamic-<claimname>'.
	// createEndpointService() checks for this 'endpoint' existence in PVC's namespace and
	// If not found, it create an endpoint and svc using the IPs we dynamically picked at time
	// of volume creation.
	epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name
	epNamespace := p.options.PVC.Namespace
	endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
	if err != nil {
		glog.Errorf("glusterfs: failed to create endpoint/service: %v", err)
		err = cli.VolumeDelete(volume.Id)
		if err != nil {
			glog.Errorf("glusterfs: error when deleting the volume :%v , manual deletion required", err)
		}
		return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err)
	}
	glog.V(3).Infof("glusterfs: dynamic ep %v and svc : %v ", endpoint, service)
	return &v1.GlusterfsVolumeSource{
		EndpointsName: endpoint.Name,
		Path:          volume.Name,
		ReadOnly:      false,
	}, sz, nil
}