コード例 #1
0
ファイル: brick_create.go プロジェクト: yepengxj/heketi
func createDestroyConcurrently(db *bolt.DB,
	executor executors.Executor,
	brick_entries []*BrickEntry,
	create_type CreateType) error {

	sg := utils.NewStatusGroup()

	// Create a goroutine for each brick
	for _, brick := range brick_entries {
		sg.Add(1)
		go func(b *BrickEntry) {
			defer sg.Done()
			if create_type == CREATOR_CREATE {
				sg.Err(b.Create(db, executor))
			} else {
				sg.Err(b.Destroy(db, executor))
			}
		}(brick)
	}

	// Wait here until all goroutines have returned.  If
	// any of errored, it would be cought here
	err := sg.Result()
	if err != nil {
		logger.Err(err)

		// Destroy all bricks and cleanup
		if create_type == CREATOR_CREATE {
			createDestroyConcurrently(db, executor, brick_entries, CREATOR_DESTROY)
		}
	}
	return err
}
コード例 #2
0
ファイル: heketi_test.go プロジェクト: yepengxj/heketi
func setupCluster(t *testing.T) {
	tests.Assert(t, heketi != nil)

	nodespercluster := NODES / CLUSTERS
	nodes := getnodes()
	sg := utils.NewStatusGroup()
	for cluster := 0; cluster < CLUSTERS; cluster++ {
		sg.Add(1)
		go func(nodes_in_cluster []string) {
			defer sg.Done()
			// Create a cluster
			cluster, err := heketi.ClusterCreate()
			if err != nil {
				logger.Err(err)
				sg.Err(err)
				return
			}

			// Add nodes sequentially due to probes
			for index, hostname := range nodes_in_cluster {
				nodeReq := &api.NodeAddRequest{}
				nodeReq.ClusterId = cluster.Id
				nodeReq.Hostnames.Manage = []string{hostname}
				nodeReq.Hostnames.Storage = []string{hostname}
				nodeReq.Zone = index%ZONES + 1

				node, err := heketi.NodeAdd(nodeReq)
				if err != nil {
					logger.Err(err)
					sg.Err(err)
					return
				}

				// Add devices all concurrently
				for _, disk := range getdisks() {
					sg.Add(1)
					go func(d string) {
						defer sg.Done()

						driveReq := &api.DeviceAddRequest{}
						driveReq.Name = d
						driveReq.NodeId = node.Id

						err := heketi.DeviceAdd(driveReq)
						if err != nil {
							logger.Err(err)
							sg.Err(err)
						}
					}(disk)
				}
			}
		}(nodes[cluster*nodespercluster : (cluster+1)*nodespercluster])
	}

	// Wait here for results
	err := sg.Result()
	tests.Assert(t, err == nil)

}
コード例 #3
0
ファイル: heketi_test.go プロジェクト: yepengxj/heketi
func teardownCluster(t *testing.T) {
	clusters, err := heketi.ClusterList()
	tests.Assert(t, err == nil)

	for _, cluster := range clusters.Clusters {

		clusterInfo, err := heketi.ClusterInfo(cluster)
		tests.Assert(t, err == nil)

		// Delete volumes in this cluster
		for _, volume := range clusterInfo.Volumes {
			err := heketi.VolumeDelete(volume)
			tests.Assert(t, err == nil)
		}

		// Delete nodes
		for _, node := range clusterInfo.Nodes {

			// Get node information
			nodeInfo, err := heketi.NodeInfo(node)
			tests.Assert(t, err == nil)

			// Delete each device
			sg := utils.NewStatusGroup()
			for _, device := range nodeInfo.DevicesInfo {
				sg.Add(1)
				go func(id string) {
					defer sg.Done()

					err := heketi.DeviceDelete(id)
					sg.Err(err)

				}(device.Id)
			}
			err = sg.Result()
			tests.Assert(t, err == nil)

			// Delete node
			err = heketi.NodeDelete(node)
			tests.Assert(t, err == nil)
		}

		// Delete cluster
		err = heketi.ClusterDelete(cluster)
		tests.Assert(t, err == nil)
	}
}
コード例 #4
0
ファイル: heketi_test.go プロジェクト: yepengxj/heketi
func setupCluster(t *testing.T) {
	tests.Assert(t, heketi != nil)

	// Create a cluster
	cluster, err := heketi.ClusterCreate()
	tests.Assert(t, err == nil)

	// Add nodes
	for index, hostname := range storagevms {
		nodeReq := &api.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{hostname}
		nodeReq.Hostnames.Storage = []string{hostname}
		nodeReq.Zone = index%2 + 1

		node, err := heketi.NodeAdd(nodeReq)
		tests.Assert(t, err == nil)

		// Add devices
		sg := utils.NewStatusGroup()
		for _, disk := range disks {
			sg.Add(1)
			go func(d string) {
				defer sg.Done()

				driveReq := &api.DeviceAddRequest{}
				driveReq.Name = d
				driveReq.NodeId = node.Id

				err := heketi.DeviceAdd(driveReq)
				sg.Err(err)
			}(disk)
		}

		err = sg.Result()
		tests.Assert(t, err == nil)
	}
}
コード例 #5
0
ファイル: volume_entry.go プロジェクト: yepengxj/heketi
func (v *VolumeEntry) checkBricksCanBeDestroyed(db *bolt.DB,
	executor executors.Executor,
	brick_entries []*BrickEntry) error {

	sg := utils.NewStatusGroup()

	// Create a goroutine for each brick
	for _, brick := range brick_entries {
		sg.Add(1)
		go func(b *BrickEntry) {
			defer sg.Done()
			sg.Err(b.DestroyCheck(db, executor))
		}(brick)
	}

	// Wait here until all goroutines have returned.  If
	// any of errored, it would be cought here
	err := sg.Result()
	if err != nil {
		logger.Err(err)
	}
	return err
}
コード例 #6
0
ファイル: heketi_test.go プロジェクト: yepengxj/heketi
func teardownCluster(t *testing.T) {
	clusters, err := heketi.ClusterList()
	tests.Assert(t, err == nil)

	sg := utils.NewStatusGroup()
	for _, cluster := range clusters.Clusters {

		sg.Add(1)
		go func(clusterId string) {
			defer sg.Done()

			clusterInfo, err := heketi.ClusterInfo(clusterId)
			if err != nil {
				logger.Err(err)
				sg.Err(err)
				return
			}

			// Delete volumes in this cluster
			for _, volume := range clusterInfo.Volumes {
				err := heketi.VolumeDelete(volume)
				if err != nil {
					logger.Err(err)
					sg.Err(err)
					return
				}
			}

			// Delete all devices in the cluster concurrently
			deviceSg := utils.NewStatusGroup()
			for _, node := range clusterInfo.Nodes {

				// Get node information
				nodeInfo, err := heketi.NodeInfo(node)
				if err != nil {
					logger.Err(err)
					deviceSg.Err(err)
					return
				}

				// Delete each device
				for _, device := range nodeInfo.DevicesInfo {
					deviceSg.Add(1)
					go func(id string) {
						defer deviceSg.Done()

						err := heketi.DeviceDelete(id)
						if err != nil {
							logger.Err(err)
							deviceSg.Err(err)
							return
						}

					}(device.Id)
				}
			}
			err = deviceSg.Result()
			if err != nil {
				logger.Err(err)
				sg.Err(err)
				return
			}

			// Delete nodes
			for _, node := range clusterInfo.Nodes {
				err = heketi.NodeDelete(node)
				if err != nil {
					logger.Err(err)
					sg.Err(err)
					return
				}
			}

			// Delete cluster
			err = heketi.ClusterDelete(clusterId)
			if err != nil {
				logger.Err(err)
				sg.Err(err)
				return
			}

		}(cluster)

	}

	err = sg.Result()
	tests.Assert(t, err == nil)
}
コード例 #7
0
ファイル: client_test.go プロジェクト: yepengxj/heketi
func TestTopology(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster correctly
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)

	//Create multiple clusters
	clusteridlist := make([]api.ClusterInfoResponse, 0)
	for m := 0; m < 4; m++ {
		cluster, err := c.ClusterCreate()
		tests.Assert(t, err == nil)
		tests.Assert(t, cluster.Id != "")
		clusteridlist = append(clusteridlist, *cluster)
	}
	tests.Assert(t, len(clusteridlist) == 4)

	//Verify the topology info and then delete the clusters
	topology, err := c.TopologyInfo()
	tests.Assert(t, err == nil)
	for _, cid := range topology.ClusterList {
		clusterid := cid.Id
		err = c.ClusterDelete(clusterid)
		tests.Assert(t, err == nil)
	}

	//Create a cluster and add multiple nodes,devices and volumes
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)
	tests.Assert(t, cluster.Id != "")
	tests.Assert(t, len(cluster.Nodes) == 0)
	tests.Assert(t, len(cluster.Volumes) == 0)

	// Get information about the client
	clusterinfo, err := c.ClusterInfo(cluster.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(clusterinfo, cluster))

	// Get information about the Topology and verify the cluster creation
	topology, err = c.TopologyInfo()
	tests.Assert(t, err == nil)
	tests.Assert(t, topology.ClusterList[0].Id == cluster.Id)

	// Create multiple nodes and add devices to the nodes
	nodeinfos := make([]api.NodeInfoResponse, 0)
	for n := 0; n < 4; n++ {
		nodeReq := &api.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)}
		nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)}
		nodeReq.Zone = n + 1

		// Create node
		node, err := c.NodeAdd(nodeReq)
		nodeinfos = append(nodeinfos, *node)
		tests.Assert(t, err == nil)

		// Create a device request
		sg := utils.NewStatusGroup()
		for i := 0; i < 50; i++ {
			sg.Add(1)
			go func() {
				defer sg.Done()

				deviceReq := &api.DeviceAddRequest{}
				deviceReq.Name = "sd" + utils.GenUUID()[:8]
				deviceReq.NodeId = node.Id

				// Create device
				err := c.DeviceAdd(deviceReq)
				sg.Err(err)
			}()
		}
		tests.Assert(t, sg.Result() == nil)
	}
	tests.Assert(t, len(nodeinfos) != 0)

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	//Create multiple volumes to the cluster
	volumeinfos := make([]api.VolumeInfoResponse, 0)
	for n := 0; n < 4; n++ {
		volumeReq := &api.VolumeCreateRequest{}
		volumeReq.Size = 10
		volume, err := c.VolumeCreate(volumeReq)
		tests.Assert(t, err == nil)
		tests.Assert(t, volume.Id != "")
		tests.Assert(t, volume.Size == volumeReq.Size)
		volumeinfos = append(volumeinfos, *volume)
	}
	topology, err = c.TopologyInfo()
	tests.Assert(t, err == nil)

	// Test topology have all the existing volumes
	var volumefound int
	for _, volumeid := range topology.ClusterList[0].Volumes {
		volumeInfo := volumeid
		for _, singlevolumei := range volumeinfos {
			if singlevolumei.Id == volumeInfo.Id {
				volumefound++
				break
			}
		}
	}
	tests.Assert(t, volumefound == 4)

	// Delete all the volumes
	for _, volumeid := range topology.ClusterList[0].Volumes {
		volumeInfo := volumeid
		err = c.VolumeDelete(volumeInfo.Id)
		tests.Assert(t, err == nil)

	}

	// Verify the nodes and devices info from topology info and delete the entries
	for _, nodeid := range topology.ClusterList[0].Nodes {
		nodeInfo := nodeid
		var found bool
		for _, singlenodei := range nodeinfos {
			found = false
			if singlenodei.Id == nodeInfo.Id {
				found = true
				break
			}
		}
		tests.Assert(t, found == true)

		// Delete all devices
		sg := utils.NewStatusGroup()
		for index := range nodeInfo.DevicesInfo {
			sg.Add(1)
			go func(i int) {
				defer sg.Done()
				sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
			}(index)
		}
		err = sg.Result()
		tests.Assert(t, err == nil, err)

		// Delete node
		err = c.NodeDelete(nodeInfo.Id)
		tests.Assert(t, err == nil)

	}

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}
コード例 #8
0
ファイル: client_test.go プロジェクト: yepengxj/heketi
func TestClientVolume(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)

	// Create node request packet
	for n := 0; n < 4; n++ {
		nodeReq := &api.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)}
		nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)}
		nodeReq.Zone = n + 1

		// Create node
		node, err := c.NodeAdd(nodeReq)
		tests.Assert(t, err == nil)

		// Create a device request
		sg := utils.NewStatusGroup()
		for i := 0; i < 50; i++ {
			sg.Add(1)
			go func() {
				defer sg.Done()

				deviceReq := &api.DeviceAddRequest{}
				deviceReq.Name = "sd" + utils.GenUUID()[:8]
				deviceReq.NodeId = node.Id

				// Create device
				err := c.DeviceAdd(deviceReq)
				sg.Err(err)

			}()
		}
		tests.Assert(t, sg.Result() == nil)
	}

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	// Create a volume
	volumeReq := &api.VolumeCreateRequest{}
	volumeReq.Size = 10
	volume, err := c.VolumeCreate(volumeReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volume.Id != "")
	tests.Assert(t, volume.Size == volumeReq.Size)

	// Get list of volumes
	list, err = c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 1)
	tests.Assert(t, list.Volumes[0] == volume.Id)

	// Get info on incorrect id
	info, err := c.VolumeInfo("badid")
	tests.Assert(t, err != nil)

	// Get info
	info, err = c.VolumeInfo(volume.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(info, volume))

	// Expand volume with a bad id
	expandReq := &api.VolumeExpandRequest{}
	expandReq.Size = 10
	volumeInfo, err := c.VolumeExpand("badid", expandReq)
	tests.Assert(t, err != nil)

	// Expand volume
	volumeInfo, err = c.VolumeExpand(volume.Id, expandReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volumeInfo.Size == 20)

	// Delete bad id
	err = c.VolumeDelete("badid")
	tests.Assert(t, err != nil)

	// Delete volume
	err = c.VolumeDelete(volume.Id)
	tests.Assert(t, err == nil)

	clusterInfo, err := c.ClusterInfo(cluster.Id)
	for _, nodeid := range clusterInfo.Nodes {
		// Get node information
		nodeInfo, err := c.NodeInfo(nodeid)
		tests.Assert(t, err == nil)

		// Delete all devices
		sg := utils.NewStatusGroup()
		for index := range nodeInfo.DevicesInfo {
			sg.Add(1)
			go func(i int) {
				defer sg.Done()
				sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
			}(index)
		}
		err = sg.Result()
		tests.Assert(t, err == nil, err)

		// Delete node
		err = c.NodeDelete(nodeid)
		tests.Assert(t, err == nil)

	}

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}