func createDestroyConcurrently(db *bolt.DB, executor executors.Executor, brick_entries []*BrickEntry, create_type CreateType) error { sg := utils.NewStatusGroup() // Create a goroutine for each brick for _, brick := range brick_entries { sg.Add(1) go func(b *BrickEntry) { defer sg.Done() if create_type == CREATOR_CREATE { sg.Err(b.Create(db, executor)) } else { sg.Err(b.Destroy(db, executor)) } }(brick) } // Wait here until all goroutines have returned. If // any of errored, it would be cought here err := sg.Result() if err != nil { logger.Err(err) // Destroy all bricks and cleanup if create_type == CREATOR_CREATE { createDestroyConcurrently(db, executor, brick_entries, CREATOR_DESTROY) } } return err }
func setupCluster(t *testing.T) { tests.Assert(t, heketi != nil) nodespercluster := NODES / CLUSTERS nodes := getnodes() sg := utils.NewStatusGroup() for cluster := 0; cluster < CLUSTERS; cluster++ { sg.Add(1) go func(nodes_in_cluster []string) { defer sg.Done() // Create a cluster cluster, err := heketi.ClusterCreate() if err != nil { logger.Err(err) sg.Err(err) return } // Add nodes sequentially due to probes for index, hostname := range nodes_in_cluster { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{hostname} nodeReq.Hostnames.Storage = []string{hostname} nodeReq.Zone = index%ZONES + 1 node, err := heketi.NodeAdd(nodeReq) if err != nil { logger.Err(err) sg.Err(err) return } // Add devices all concurrently for _, disk := range getdisks() { sg.Add(1) go func(d string) { defer sg.Done() driveReq := &glusterfs.DeviceAddRequest{} driveReq.Name = d driveReq.Weight = 100 driveReq.NodeId = node.Id err := heketi.DeviceAdd(driveReq) if err != nil { logger.Err(err) sg.Err(err) } }(disk) } } }(nodes[cluster*nodespercluster : (cluster+1)*nodespercluster]) } // Wait here for results err := sg.Result() tests.Assert(t, err == nil) }
func teardownCluster(t *testing.T) { clusters, err := heketi.ClusterList() tests.Assert(t, err == nil) for _, cluster := range clusters.Clusters { clusterInfo, err := heketi.ClusterInfo(cluster) tests.Assert(t, err == nil) // Delete volumes in this cluster for _, volume := range clusterInfo.Volumes { err := heketi.VolumeDelete(volume) tests.Assert(t, err == nil) } // Delete nodes for _, node := range clusterInfo.Nodes { // Get node information nodeInfo, err := heketi.NodeInfo(node) tests.Assert(t, err == nil) // Delete each device sg := utils.NewStatusGroup() for _, device := range nodeInfo.DevicesInfo { sg.Add(1) go func(id string) { defer sg.Done() err := heketi.DeviceDelete(id) sg.Err(err) }(device.Id) } err = sg.Result() tests.Assert(t, err == nil) // Delete node err = heketi.NodeDelete(node) tests.Assert(t, err == nil) } // Delete cluster err = heketi.ClusterDelete(cluster) tests.Assert(t, err == nil) } }
func setupCluster(t *testing.T) { tests.Assert(t, heketi != nil) // Create a cluster cluster, err := heketi.ClusterCreate() tests.Assert(t, err == nil) // Add nodes for index, hostname := range storagevms { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{hostname} nodeReq.Hostnames.Storage = []string{hostname} nodeReq.Zone = index%2 + 1 node, err := heketi.NodeAdd(nodeReq) tests.Assert(t, err == nil) // Add devices sg := utils.NewStatusGroup() for _, disk := range disks { sg.Add(1) go func(d string) { defer sg.Done() driveReq := &glusterfs.DeviceAddRequest{} driveReq.Name = d driveReq.Weight = 100 driveReq.NodeId = node.Id err := heketi.DeviceAdd(driveReq) sg.Err(err) }(disk) } err = sg.Result() tests.Assert(t, err == nil) } }
func teardownCluster(t *testing.T) { clusters, err := heketi.ClusterList() tests.Assert(t, err == nil) sg := utils.NewStatusGroup() for _, cluster := range clusters.Clusters { sg.Add(1) go func(clusterId string) { defer sg.Done() clusterInfo, err := heketi.ClusterInfo(clusterId) if err != nil { logger.Err(err) sg.Err(err) return } // Delete volumes in this cluster for _, volume := range clusterInfo.Volumes { err := heketi.VolumeDelete(volume) if err != nil { logger.Err(err) sg.Err(err) return } } // Delete all devices in the cluster concurrently deviceSg := utils.NewStatusGroup() for _, node := range clusterInfo.Nodes { // Get node information nodeInfo, err := heketi.NodeInfo(node) if err != nil { logger.Err(err) deviceSg.Err(err) return } // Delete each device for _, device := range nodeInfo.DevicesInfo { deviceSg.Add(1) go func(id string) { defer deviceSg.Done() err := heketi.DeviceDelete(id) if err != nil { logger.Err(err) deviceSg.Err(err) return } }(device.Id) } } err = deviceSg.Result() if err != nil { logger.Err(err) sg.Err(err) return } // Delete nodes for _, node := range clusterInfo.Nodes { err = heketi.NodeDelete(node) if err != nil { logger.Err(err) sg.Err(err) return } } // Delete cluster err = heketi.ClusterDelete(clusterId) if err != nil { logger.Err(err) sg.Err(err) return } }(cluster) } err = sg.Result() tests.Assert(t, err == nil) }
func TestClientVolume(t *testing.T) { db := tests.Tempfile() defer os.Remove(db) // Create the app app := glusterfs.NewTestApp(db) defer app.Close() // Setup the server ts := setupHeketiServer(app) defer ts.Close() // Create cluster c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY) tests.Assert(t, c != nil) cluster, err := c.ClusterCreate() tests.Assert(t, err == nil) // Create node request packet for n := 0; n < 4; n++ { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)} nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)} nodeReq.Zone = n + 1 // Create node node, err := c.NodeAdd(nodeReq) tests.Assert(t, err == nil) // Create a device request sg := utils.NewStatusGroup() for i := 0; i < 50; i++ { sg.Add(1) go func() { defer sg.Done() deviceReq := &glusterfs.DeviceAddRequest{} deviceReq.Name = "sd" + utils.GenUUID()[:8] deviceReq.Weight = 100 deviceReq.NodeId = node.Id // Create device err := c.DeviceAdd(deviceReq) sg.Err(err) }() } tests.Assert(t, sg.Result() == nil) } // Get list of volumes list, err := c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 0) // Create a volume volumeReq := &glusterfs.VolumeCreateRequest{} volumeReq.Size = 10 volume, err := c.VolumeCreate(volumeReq) tests.Assert(t, err == nil) tests.Assert(t, volume.Id != "") tests.Assert(t, volume.Size == volumeReq.Size) // Get list of volumes list, err = c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 1) tests.Assert(t, list.Volumes[0] == volume.Id) // Get info on incorrect id info, err := c.VolumeInfo("badid") tests.Assert(t, err != nil) // Get info info, err = c.VolumeInfo(volume.Id) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(info, volume)) // Expand volume with a bad id expandReq := &glusterfs.VolumeExpandRequest{} expandReq.Size = 10 volumeInfo, err := c.VolumeExpand("badid", expandReq) tests.Assert(t, err != nil) // Expand volume volumeInfo, err = c.VolumeExpand(volume.Id, expandReq) tests.Assert(t, err == nil) tests.Assert(t, volumeInfo.Size == 20) // Delete bad id err = c.VolumeDelete("badid") tests.Assert(t, err != nil) // Delete volume err = c.VolumeDelete(volume.Id) tests.Assert(t, err == nil) clusterInfo, err := c.ClusterInfo(cluster.Id) for _, nodeid := range clusterInfo.Nodes { // Get node information nodeInfo, err := c.NodeInfo(nodeid) tests.Assert(t, err == nil) // Delete all devices sg := utils.NewStatusGroup() for index := range nodeInfo.DevicesInfo { sg.Add(1) go func(i int) { defer sg.Done() sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id)) }(index) } err = sg.Result() tests.Assert(t, err == nil, err) // Delete node err = c.NodeDelete(nodeid) tests.Assert(t, err == nil) } // Delete cluster err = c.ClusterDelete(cluster.Id) tests.Assert(t, err == nil) }