func createDestroyConcurrently(db *bolt.DB, executor executors.Executor, brick_entries []*BrickEntry, create_type CreateType) error { sg := utils.NewStatusGroup() // Create a goroutine for each brick for _, brick := range brick_entries { sg.Add(1) go func(b *BrickEntry) { defer sg.Done() if create_type == CREATOR_CREATE { sg.Err(b.Create(db, executor)) } else { sg.Err(b.Destroy(db, executor)) } }(brick) } // Wait here until all goroutines have returned. If // any of errored, it would be cought here err := sg.Result() if err != nil { logger.Err(err) // Destroy all bricks and cleanup if create_type == CREATOR_CREATE { createDestroyConcurrently(db, executor, brick_entries, CREATOR_DESTROY) } } return err }
func teardownCluster(t *testing.T) { clusters, err := heketi.ClusterList() tests.Assert(t, err == nil) for _, cluster := range clusters.Clusters { clusterInfo, err := heketi.ClusterInfo(cluster) tests.Assert(t, err == nil) // Delete volumes in this cluster for _, volume := range clusterInfo.Volumes { err := heketi.VolumeDelete(volume) tests.Assert(t, err == nil) } // Delete nodes for _, node := range clusterInfo.Nodes { // Get node information nodeInfo, err := heketi.NodeInfo(node) tests.Assert(t, err == nil) // Delete each device sg := utils.NewStatusGroup() for _, device := range nodeInfo.DevicesInfo { sg.Add(1) go func(id string) { defer sg.Done() err := heketi.DeviceDelete(id) sg.Err(err) }(device.Id) } err = sg.Result() tests.Assert(t, err == nil) // Delete node err = heketi.NodeDelete(node) tests.Assert(t, err == nil) } // Delete cluster err = heketi.ClusterDelete(cluster) tests.Assert(t, err == nil) } }
func setupCluster(t *testing.T) { tests.Assert(t, heketi != nil) // Create a cluster cluster, err := heketi.ClusterCreate() tests.Assert(t, err == nil) // Add nodes for index, hostname := range storagevms { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{hostname} nodeReq.Hostnames.Storage = []string{hostname} nodeReq.Zone = index % 2 node, err := heketi.NodeAdd(nodeReq) tests.Assert(t, err == nil) // Add devices sg := utils.NewStatusGroup() for _, disk := range disks { sg.Add(1) go func(d string) { defer sg.Done() driveReq := &glusterfs.DeviceAddRequest{} driveReq.Name = d driveReq.Weight = 100 driveReq.NodeId = node.Id err := heketi.DeviceAdd(driveReq) sg.Err(err) }(disk) } err = sg.Result() tests.Assert(t, err == nil) } }
func TestClientVolume(t *testing.T) { db := tests.Tempfile() defer os.Remove(db) // Create the app app := glusterfs.NewTestApp(db) defer app.Close() // Setup the server ts := setupHeketiServer(app) defer ts.Close() // Create cluster c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY) tests.Assert(t, c != nil) cluster, err := c.ClusterCreate() tests.Assert(t, err == nil) // Create node request packet nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{"manage"} nodeReq.Hostnames.Storage = []string{"storage"} nodeReq.Zone = 10 // Create node node, err := c.NodeAdd(nodeReq) tests.Assert(t, err == nil) // Create a device request sg := utils.NewStatusGroup() for i := 0; i < 50; i++ { sg.Add(1) go func() { defer sg.Done() deviceReq := &glusterfs.DeviceAddRequest{} deviceReq.Name = "sd" + utils.GenUUID()[:8] deviceReq.Weight = 100 deviceReq.NodeId = node.Id // Create device err := c.DeviceAdd(deviceReq) sg.Err(err) }() } tests.Assert(t, sg.Result() == nil) // Get list of volumes list, err := c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 0) // Create a volume volumeReq := &glusterfs.VolumeCreateRequest{} volumeReq.Size = 10 volume, err := c.VolumeCreate(volumeReq) tests.Assert(t, err == nil) tests.Assert(t, volume.Id != "") tests.Assert(t, volume.Size == volumeReq.Size) // Get list of volumes list, err = c.VolumeList() tests.Assert(t, err == nil) tests.Assert(t, len(list.Volumes) == 1) tests.Assert(t, list.Volumes[0] == volume.Id) // Get info on incorrect id info, err := c.VolumeInfo("badid") tests.Assert(t, err != nil) // Get info info, err = c.VolumeInfo(volume.Id) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(info, volume)) // Expand volume with a bad id expandReq := &glusterfs.VolumeExpandRequest{} expandReq.Size = 10 volumeInfo, err := c.VolumeExpand("badid", expandReq) tests.Assert(t, err != nil) // Expand volume volumeInfo, err = c.VolumeExpand(volume.Id, expandReq) tests.Assert(t, err == nil) tests.Assert(t, volumeInfo.Size == 20) // Delete bad id err = c.VolumeDelete("badid") tests.Assert(t, err != nil) // Delete volume err = c.VolumeDelete(volume.Id) tests.Assert(t, err == nil) // Get node information nodeInfo, err := c.NodeInfo(node.Id) tests.Assert(t, err == nil) // Delete all devices sg = utils.NewStatusGroup() for index := range nodeInfo.DevicesInfo { sg.Add(1) go func(i int) { defer sg.Done() sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id)) }(index) } err = sg.Result() tests.Assert(t, err == nil, err) // Delete node err = c.NodeDelete(node.Id) tests.Assert(t, err == nil) // Delete cluster err = c.ClusterDelete(cluster.Id) tests.Assert(t, err == nil) }
func (a *App) DeviceAdd(w http.ResponseWriter, r *http.Request) { var msg DeviceAddRequest err := utils.GetJsonFromRequest(r, &msg) if err != nil { http.Error(w, "request unable to be parsed", 422) return } // Check the message has devices if len(msg.Devices) <= 0 { http.Error(w, "no devices added", http.StatusBadRequest) return } // Check the node is in the db err = a.db.View(func(tx *bolt.Tx) error { _, err := NewNodeEntryFromId(tx, msg.NodeId) if err == ErrNotFound { http.Error(w, "Node id does not exist", http.StatusNotFound) return err } else if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return err } return nil }) if err != nil { return } // Log the devices are being added logger.Info("Adding devices %+v to node %v", msg.Devices, msg.NodeId) // Add device in an asynchronous function a.asyncManager.AsyncHttpRedirectFunc(w, r, func() (string, error) { sg := utils.NewStatusGroup() for index := range msg.Devices { sg.Add(1) // Add each drive go func(dev *Device) { defer sg.Done() // Pretend work time.Sleep(1 * time.Second) device := NewDeviceEntryFromRequest(dev, msg.NodeId) err := a.db.Update(func(tx *bolt.Tx) error { node, err := NewNodeEntryFromId(tx, msg.NodeId) if err != nil { return err } // Add device to node node.DeviceAdd(device.Info.Id) // Commit err = node.Save(tx) if err != nil { return err } // Save drive err = device.Save(tx) if err != nil { return err } return nil }) if err != nil { sg.Err(err) } logger.Info("Added device %v", dev.Name) }(&msg.Devices[index]) } // Done // Returning a null string instructs the async manager // to return http status of 204 (No Content) return "", sg.Result() }) }
func setupCluster(t *testing.T) { tests.Assert(t, heketi != nil) numclusters := 5 nodespercluster := NODES / numclusters nodes := getnodes() sg := utils.NewStatusGroup() for cluster := 0; cluster < numclusters; cluster++ { sg.Add(1) go func(nodes_in_cluster []string) { defer sg.Done() // Create a cluster cluster, err := heketi.ClusterCreate() if err != nil { logger.Err(err) sg.Err(err) return } // Add nodes sequentially due to probes for index, hostname := range nodes_in_cluster { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{hostname} nodeReq.Hostnames.Storage = []string{hostname} nodeReq.Zone = index % 3 node, err := heketi.NodeAdd(nodeReq) if err != nil { logger.Err(err) sg.Err(err) return } // Add devices all concurrently for _, disk := range getdisks() { sg.Add(1) go func(d string) { defer sg.Done() driveReq := &glusterfs.DeviceAddRequest{} driveReq.Name = d driveReq.Weight = 100 driveReq.NodeId = node.Id err := heketi.DeviceAdd(driveReq) if err != nil { logger.Err(err) sg.Err(err) } }(disk) } } }(nodes[cluster*nodespercluster : (cluster+1)*nodespercluster]) } // Wait here for results err := sg.Result() tests.Assert(t, err == nil) }
func teardownCluster(t *testing.T) { clusters, err := heketi.ClusterList() tests.Assert(t, err == nil) sg := utils.NewStatusGroup() for _, cluster := range clusters.Clusters { sg.Add(1) go func(clusterId string) { defer sg.Done() clusterInfo, err := heketi.ClusterInfo(clusterId) if err != nil { logger.Err(err) sg.Err(err) return } // Delete volumes in this cluster for _, volume := range clusterInfo.Volumes { err := heketi.VolumeDelete(volume) if err != nil { logger.Err(err) sg.Err(err) return } } // Delete all devices in the cluster concurrently deviceSg := utils.NewStatusGroup() for _, node := range clusterInfo.Nodes { // Get node information nodeInfo, err := heketi.NodeInfo(node) if err != nil { logger.Err(err) deviceSg.Err(err) return } // Delete each device for _, device := range nodeInfo.DevicesInfo { deviceSg.Add(1) go func(id string) { defer deviceSg.Done() err := heketi.DeviceDelete(id) if err != nil { logger.Err(err) deviceSg.Err(err) return } }(device.Id) } } err = deviceSg.Result() if err != nil { logger.Err(err) sg.Err(err) return } // Delete nodes for _, node := range clusterInfo.Nodes { err = heketi.NodeDelete(node) if err != nil { logger.Err(err) sg.Err(err) return } } // Delete cluster err = heketi.ClusterDelete(clusterId) if err != nil { logger.Err(err) sg.Err(err) return } }(cluster) } err = sg.Result() tests.Assert(t, err == nil) }