func (c *VolumeDeleteCommand) Exec(args []string) error { //parse args c.flags.Parse(args) s := c.flags.Args() //ensure proper number of args if len(s) < 1 { return errors.New("Volume id missing") } //set volumeId volumeId := c.flags.Arg(0) // Create a client heketi := client.NewClient(c.options.Url, c.options.User, c.options.Key) //set url err := heketi.VolumeDelete(volumeId) if err == nil { fmt.Fprintf(stdout, "Volume %v deleted\n", volumeId) } return err }
func (c *ClusterCreateCommand) Exec(args []string) error { //parse args c.flags.Parse(args) // Create a client to talk to Heketi heketi := client.NewClient(c.options.Url, c.options.User, c.options.Key) // Create cluster cluster, err := heketi.ClusterCreate() if err != nil { return err } // Check if JSON should be printed if c.options.Json { data, err := json.Marshal(cluster) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "Cluster id: %v\n", cluster.Id) } return nil }
func (d *DeviceAddCommand) Exec(args []string) error { // Parse args d.flags.Parse(args) // Check arguments if d.name == "" { return errors.New("Missing device name") } if d.nodeId == "" { return errors.New("Missing node id") } // Create request blob req := &glusterfs.DeviceAddRequest{} req.Name = d.device req.NodeId = d.nodeId req.Weight = 100 // Create a client heketi := client.NewClient(d.options.Url, d.options.User, d.options.Key) // Add node err := heketi.DeviceAdd(req) if err != nil { return err } else { fmt.Fprintf(stdout, "Device added successfully\n") } return nil }
func (n *NodeDestroyCommand) Exec(args []string) error { //parse args n.flags.Parse(args) s := n.flags.Args() //ensure proper number of args if len(s) < 1 { return errors.New("Node id missing") } //set clusterId nodeId := n.flags.Arg(0) // Create a client heketi := client.NewClient(n.options.Url, n.options.User, n.options.Key) //set url err := heketi.NodeDelete(nodeId) if err == nil { fmt.Fprintf(stdout, "Node %v deleted\n", nodeId) } return err }
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) { volSizeBytes := p.options.Capacity.Value() sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) glog.V(2).Infof("glusterfs: create volume of size:%d bytes", volSizeBytes) if p.glusterfsClusterConf.glusterRestUrl == "" { glog.Errorf("glusterfs : rest server endpoint is empty") return nil, 0, fmt.Errorf("failed to create gluster REST client, REST URL is empty") } cli := gcli.NewClient(p.glusterRestUrl, p.glusterRestUser, p.glusterRestUserKey) if cli == nil { glog.Errorf("glusterfs: failed to create gluster rest client") return nil, 0, fmt.Errorf("failed to create gluster REST client, REST server authentication failed") } volumeReq := &gapi.VolumeCreateRequest{Size: sz, Durability: gapi.VolumeDurabilityInfo{Type: durabilitytype, Replicate: gapi.ReplicaDurability{Replica: replicacount}}} volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("glusterfs: error creating volume %s ", err) return nil, 0, fmt.Errorf("error creating volume %v", err) } glog.V(1).Infof("glusterfs: volume with size :%d and name:%s created", volume.Size, volume.Name) return &api.GlusterfsVolumeSource{ EndpointsName: p.glusterfsClusterConf.glusterep, Path: volume.Name, ReadOnly: false, }, sz, nil }
func (n *VolumeInfoCommand) Exec(args []string) error { n.flags.Parse(args) //ensure proper number of args s := n.flags.Args() if len(s) < 1 { return errors.New("Volume id missing") } // Set volume id volumeId := n.flags.Arg(0) // Create a client to talk to Heketi heketi := client.NewClient(n.options.Url, n.options.User, n.options.Key) // Create cluster info, err := heketi.VolumeInfo(volumeId) if err != nil { return err } if n.options.Json { data, err := json.Marshal(info) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "%v", info) } return nil }
func (c *VolumeListCommand) Exec(args []string) error { //parse args c.flags.Parse(args) // Create a client heketi := client.NewClient(c.options.Url, c.options.User, c.options.Key) // List volumes list, err := heketi.VolumeList() if err != nil { return err } if c.options.Json { data, err := json.Marshal(list) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { output := strings.Join(list.Volumes, "\n") fmt.Fprintf(stdout, "Volumes:\n%v\n", output) } return nil }
func (n *NodeAddCommand) Exec(args []string) error { // Parse args n.flags.Parse(args) // Check arguments if n.zone == -1 { return errors.New("Missing zone") } if n.managmentHostNames == "" { return errors.New("Missing management hostname") } if n.storageHostNames == "" { return errors.New("Missing storage hostname") } if n.clusterId == "" { return errors.New("Missing cluster id") } // Create request blob req := &glusterfs.NodeAddRequest{} req.ClusterId = n.clusterId req.Hostnames.Manage = []string{n.managmentHostNames} req.Hostnames.Storage = []string{n.storageHostNames} req.Zone = n.zone // Create a client heketi := client.NewClient(n.options.Url, n.options.User, n.options.Key) // Add node node, err := heketi.NodeAdd(req) if err != nil { return err } if n.options.Json { data, err := json.Marshal(node) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "Node information:\n"+ "Id: %v\n"+ "Cluster Id: %v\n"+ "Zone: %v\n"+ "Management Hostname %v\n"+ "Storage Hostname %v\n", node.Id, node.ClusterId, node.Zone, node.Hostnames.Manage[0], node.Hostnames.Storage[0]) } return nil }
func (n *NodeInfoCommand) Exec(args []string) error { n.flags.Parse(args) //ensure proper number of args s := n.flags.Args() if len(s) < 1 { return errors.New("Node id missing") } // Set node id nodeId := n.flags.Arg(0) // Create a client to talk to Heketi heketi := client.NewClient(n.options.Url, n.options.User, n.options.Key) // Create cluster info, err := heketi.NodeInfo(nodeId) if err != nil { return err } if n.options.Json { data, err := json.Marshal(info) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "Node Id: %v\n"+ "Cluster Id: %v\n"+ "Zone: %v\n"+ "Management Hostname: %v\n"+ "Storage Hostname: %v\n", info.Id, info.ClusterId, info.Zone, info.Hostnames.Manage[0], info.Hostnames.Storage[0]) fmt.Fprintf(stdout, "Devices:\n") for _, d := range info.DevicesInfo { fmt.Fprintf(stdout, "Id:%-35v"+ "Name:%-20v"+ "Size (GiB):%-8v"+ "Used (GiB):%-8v"+ "Free (GiB):%-8v\n", d.Id, d.Name, d.Storage.Total/(1024*1024), d.Storage.Used/(1024*1024), d.Storage.Free/(1024*1024)) } } return nil }
func (v *VolumeCreateCommand) Exec(args []string) error { // Parse args v.flags.Parse(args) // Check volume size if v.size == -1 { return errors.New("Missing volume size") } // Check clusters var clusters []string if v.clusters != "" { clusters = strings.Split(v.clusters, ",") } // Create request blob req := &glusterfs.VolumeCreateRequest{} req.Size = v.size req.Clusters = clusters req.Durability.Type = v.durability req.Durability.Replicate.Replica = v.replica req.Durability.Disperse.Data = v.disperse_data req.Durability.Disperse.Redundancy = v.redundancy if v.volname != "" { req.Name = v.volname } if v.snapshot_factor > 1.0 { req.Snapshot.Factor = float32(v.snapshot_factor) req.Snapshot.Enable = true } // Create a client heketi := client.NewClient(v.options.Url, v.options.User, v.options.Key) // Add volume volume, err := heketi.VolumeCreate(req) if err != nil { return err } if v.options.Json { data, err := json.Marshal(volume) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "%v", volume) } return nil }
func (d *glusterfsVolumeDeleter) Delete() error { var err error glog.V(2).Infof("glusterfs: delete volume: %s ", d.glusterfsMounter.path) volumeName := d.glusterfsMounter.path volumeId := dstrings.TrimPrefix(volumeName, volPrefix) class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) if err != nil { return err } cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient()) if err != nil { return err } d.provisioningConfig = *cfg glog.V(4).Infof("glusterfs: deleting volume %q with configuration %+v", volumeId, d.provisioningConfig) cli := gcli.NewClient(d.url, d.user, d.secretValue) if cli == nil { glog.Errorf("glusterfs: failed to create glusterfs rest client") return fmt.Errorf("glusterfs: failed to create glusterfs rest client, REST server authentication failed") } err = cli.VolumeDelete(volumeId) if err != nil { glog.Errorf("glusterfs: error when deleting the volume :%v", err) return err } glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumeName) //Deleter takes endpoint and endpointnamespace from pv spec. pvSpec := d.spec.Spec var dynamicEndpoint, dynamicNamespace string if pvSpec.ClaimRef == nil { glog.Errorf("glusterfs: ClaimRef is nil") return fmt.Errorf("glusterfs: ClaimRef is nil") } if pvSpec.ClaimRef.Namespace == "" { glog.Errorf("glusterfs: namespace is nil") return fmt.Errorf("glusterfs: namespace is nil") } dynamicNamespace = pvSpec.ClaimRef.Namespace if pvSpec.Glusterfs.EndpointsName != "" { dynamicEndpoint = pvSpec.Glusterfs.EndpointsName } glog.V(3).Infof("glusterfs: dynamic namespace and endpoint : [%v/%v]", dynamicNamespace, dynamicEndpoint) err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint) if err != nil { glog.Errorf("glusterfs: error when deleting endpoint/service :%v", err) } else { glog.V(1).Infof("glusterfs: [%v/%v] deleted successfully ", dynamicNamespace, dynamicEndpoint) } return nil }
func (d *DeviceInfoCommand) Exec(args []string) error { d.flags.Parse(args) //ensure proper number of args s := d.flags.Args() if len(s) < 1 { return errors.New("Device id missing") } // Set node id deviceId := d.flags.Arg(0) // Create a client to talk to Heketi heketi := client.NewClient(d.options.Url, d.options.User, d.options.Key) // Create cluster info, err := heketi.DeviceInfo(deviceId) if err != nil { return err } if d.options.Json { data, err := json.Marshal(info) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "Device Id: %v\n"+ "Name: %v\n"+ "Size (GiB): %v\n"+ "Used (GiB): %v\n"+ "Free (GiB): %v\n", info.Id, info.Name, info.Storage.Total/(1024*1024), info.Storage.Used/(1024*1024), info.Storage.Free/(1024*1024)) fmt.Fprintf(stdout, "Bricks:\n") for _, d := range info.Bricks { fmt.Fprintf(stdout, "Id:%-35v"+ "Size (GiB):%-8v"+ "Path: %v\n", d.Id, d.Size/(1024*1024), d.Path) } } return nil }
func (d *glusterfsVolumeDeleter) Delete() error { var err error glog.V(2).Infof("glusterfs: delete volume :%s ", d.glusterfsMounter.path) volumetodel := d.glusterfsMounter.path d.glusterfsClusterConf = d.plugin.clusterconf newvolumetodel := dstrings.TrimPrefix(volumetodel, volprefix) cli := gcli.NewClient(d.glusterRestUrl, d.glusterRestUser, d.glusterRestUserKey) if cli == nil { glog.Errorf("glusterfs: failed to create gluster rest client") return fmt.Errorf("glusterfs: failed to create gluster rest client, REST server authentication failed") } err = cli.VolumeDelete(newvolumetodel) if err != nil { glog.V(4).Infof("glusterfs: error when deleting the volume :%s", err) return err } glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumetodel) return nil }
func (d *glusterfsVolumeDeleter) Delete() error { var err error glog.V(2).Infof("glusterfs: delete volume: %s ", d.glusterfsMounter.path) volumeName := d.glusterfsMounter.path volumeId := dstrings.TrimPrefix(volumeName, volprefix) err = d.annotationsToParam(d.spec) if err != nil { return err } if len(d.secretName) > 0 { d.secretValue, err = parseSecret(d.secretNamespace, d.secretName, d.plugin.host.GetKubeClient()) if err != nil { glog.Errorf("glusterfs: failed to read secret: %v", err) return err } } else if len(d.userKey) > 0 { d.secretValue = d.userKey } else { d.secretValue = "" } glog.V(4).Infof("glusterfs: deleting volume %q with configuration %+v", volumeId, d.provisioningConfig) cli := gcli.NewClient(d.url, d.user, d.secretValue) if cli == nil { glog.Errorf("glusterfs: failed to create gluster rest client") return fmt.Errorf("glusterfs: failed to create gluster rest client, REST server authentication failed") } err = cli.VolumeDelete(volumeId) if err != nil { glog.V(4).Infof("glusterfs: error when deleting the volume :%s", err) return err } glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumeName) return nil }
func (c *ClusterInfoCommand) Exec(args []string) error { //parse args c.flags.Parse(args) //ensure proper number of args s := c.flags.Args() if len(s) < 1 { return errors.New("Cluster id missing") } //set clusterId clusterId := c.flags.Arg(0) // Create a client to talk to Heketi heketi := client.NewClient(c.options.Url, c.options.User, c.options.Key) // Create cluster info, err := heketi.ClusterInfo(clusterId) if err != nil { return err } // Check if JSON should be printed if c.options.Json { data, err := json.Marshal(info) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "Cluster id: %v\n", info.Id) fmt.Fprintf(stdout, "Nodes:\n%v", strings.Join(info.Nodes, "\n")) fmt.Fprintf(stdout, "\nVolumes:\n%v", strings.Join(info.Volumes, "\n")) } return nil }
func (v *VolumeExpandCommand) Exec(args []string) error { // Parse args v.flags.Parse(args) // Check volume size if v.expand_size == -1 { return errors.New("Missing volume amount to expand") } if v.id == "" { return errors.New("Missing volume id") } // Create request req := &glusterfs.VolumeExpandRequest{} req.Size = v.expand_size // Create client heketi := client.NewClient(v.options.Url, v.options.User, v.options.Key) // Expand volume volume, err := heketi.VolumeExpand(v.id, req) if err != nil { return err } if v.options.Json { data, err := json.Marshal(volume) if err != nil { return err } fmt.Fprintf(stdout, string(data)) } else { fmt.Fprintf(stdout, "%v", volume) } return nil }
) // These are the settings for the vagrant file const ( // The heketi server must be running on the host heketiUrl = "http://127.0.0.1:8080" // VMs DISKS = 24 NODES = 30 ) var ( // Heketi client heketi = client.NewClient(heketiUrl, "admin", "adminkey") logger = utils.NewLogger("[test]", utils.LEVEL_DEBUG) ) func getdisks() []string { diskletters := make([]string, DISKS) for index, i := 0, []byte("b")[0]; index < DISKS; index, i = index+1, i+1 { diskletters[index] = "/dev/vd" + string(i) } return diskletters } func getnodes() []string { nodelist := make([]string, NODES)
func (l *LoadCommand) Exec(args []string) error { // Parse args l.flags.Parse(args) // Check arguments if l.jsonConfigFile == "" { return errors.New("Missing configuration file") } // Load config file fp, err := os.Open(l.jsonConfigFile) if err != nil { return errors.New("Unable to open config file") } defer fp.Close() configParser := json.NewDecoder(fp) var topology ConfigFile if err = configParser.Decode(&topology); err != nil { return errors.New("Unable to parse config file") } heketi := client.NewClient(l.options.Url, l.options.User, l.options.Key) for _, cluster := range topology.Clusters { fmt.Fprintf(stdout, "Creating cluster ... ") clusterInfo, err := heketi.ClusterCreate() if err != nil { return err } fmt.Fprintf(stdout, "ID: %v\n", clusterInfo.Id) for _, node := range cluster.Nodes { fmt.Fprintf(stdout, "\tCreating node %v ... ", node.Node.Hostnames.Manage[0]) node.Node.ClusterId = clusterInfo.Id nodeInfo, err := heketi.NodeAdd(&node.Node) if err != nil { return err } fmt.Fprintf(stdout, "ID: %v\n", nodeInfo.Id) for _, device := range node.Devices { fmt.Fprintf(stdout, "\t\tAdding device %v ... ", device) req := &glusterfs.DeviceAddRequest{} req.Name = device req.NodeId = nodeInfo.Id req.Weight = 100 err := heketi.DeviceAdd(req) if err != nil { return nil } fmt.Fprintf(stdout, "OK\n") } } } return nil }
if jsonConfigFile == "" { return errors.New("Missing configuration file") } // Load config file fp, err := os.Open(jsonConfigFile) if err != nil { return errors.New("Unable to open config file") } defer fp.Close() configParser := json.NewDecoder(fp) var topology ConfigFile if err = configParser.Decode(&topology); err != nil { return errors.New("Unable to parse config file") } heketi := client.NewClient(options.Url, options.User, options.Key) for _, cluster := range topology.Clusters { fmt.Fprintf(stdout, "Creating cluster ... ") clusterInfo, err := heketi.ClusterCreate() if err != nil { return err } fmt.Fprintf(stdout, "ID: %v\n", clusterInfo.Id) for _, node := range cluster.Nodes { fmt.Fprintf(stdout, "\tCreating node %v ... ", node.Node.Hostnames.Manage[0]) node.Node.ClusterId = clusterInfo.Id nodeInfo, err := heketi.NodeAdd(&node.Node) if err != nil { return err }
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) { var clusterIds []string capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig) if p.url == "" { glog.Errorf("glusterfs : rest server endpoint is empty") return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") } cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { glog.Errorf("glusterfs: failed to create glusterfs rest client") return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } if p.provisioningConfig.clusterId != "" { clusterIds = dstrings.Split(p.clusterId, ",") glog.V(4).Infof("glusterfs: provided clusterids: %v", clusterIds) } gid64 := int64(gid) volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIds, Gid: gid64, Durability: gapi.VolumeDurabilityInfo{Type: durabilityType, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}} volume, err := cli.VolumeCreate(volumeReq) if err != nil { glog.Errorf("glusterfs: error creating volume %v ", err) return nil, 0, fmt.Errorf("error creating volume %v", err) } glog.V(1).Infof("glusterfs: volume with size: %d and name: %s created", volume.Size, volume.Name) clusterinfo, err := cli.ClusterInfo(volume.Cluster) if err != nil { glog.Errorf("glusterfs: failed to get cluster details: %v", err) return nil, 0, fmt.Errorf("failed to get cluster details: %v", err) } // For the above dynamically provisioned volume, we gather the list of node IPs // of the cluster on which provisioned volume belongs to, as there can be multiple // clusters. var dynamicHostIps []string for _, node := range clusterinfo.Nodes { nodei, err := cli.NodeInfo(string(node)) if err != nil { glog.Errorf("glusterfs: failed to get hostip: %v", err) return nil, 0, fmt.Errorf("failed to get hostip: %v", err) } ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "") dynamicHostIps = append(dynamicHostIps, ipaddr) } glog.V(3).Infof("glusterfs: hostlist :%v", dynamicHostIps) if len(dynamicHostIps) == 0 { glog.Errorf("glusterfs: no hosts found: %v", err) return nil, 0, fmt.Errorf("no hosts found: %v", err) } // The 'endpointname' is created in form of 'gluster-dynamic-<claimname>'. // createEndpointService() checks for this 'endpoint' existence in PVC's namespace and // If not found, it create an endpoint and svc using the IPs we dynamically picked at time // of volume creation. epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name epNamespace := p.options.PVC.Namespace endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name) if err != nil { glog.Errorf("glusterfs: failed to create endpoint/service: %v", err) err = cli.VolumeDelete(volume.Id) if err != nil { glog.Errorf("glusterfs: error when deleting the volume :%v , manual deletion required", err) } return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err) } glog.V(3).Infof("glusterfs: dynamic ep %v and svc : %v ", endpoint, service) return &v1.GlusterfsVolumeSource{ EndpointsName: endpoint.Name, Path: volume.Name, ReadOnly: false, }, sz, nil }