示例#1
0
func createSampleNodeEntry() *NodeEntry {
	req := &api.NodeAddRequest{
		ClusterId: "123",
		Hostnames: api.HostAddresses{
			Manage:  []string{"manage" + utils.GenUUID()[:8]},
			Storage: []string{"storage" + utils.GenUUID()[:8]},
		},
		Zone: 99,
	}

	return NewNodeEntryFromRequest(req)
}
示例#2
0
func TestSimpleAllocatorAddRemoveDevice(t *testing.T) {
	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	cluster := createSampleClusterEntry()
	node := createSampleNodeEntry()
	node.Info.ClusterId = cluster.Info.Id
	device := createSampleDeviceEntry(node.Info.Id, 10000)

	tests.Assert(t, len(a.rings) == 0)
	err := a.AddDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)
	tests.Assert(t, a.rings[cluster.Info.Id] != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(cluster.Info.Id, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d == device.Info.Id)
	}
	err = <-errc
	tests.Assert(t, devices == 1)
	tests.Assert(t, err == nil)

	// Now remove the device
	err = a.RemoveDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)

	// Get the nodes from the ring
	ch, _, errc = a.GetNodes(cluster.Info.Id, utils.GenUUID())

	devices = 0
	for d := range ch {
		devices++
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, devices == 0)
	tests.Assert(t, err == nil)

}
func TestSimpleAllocatorGetDeviceList(t *testing.T) {
	r := NewSimpleAllocatorRing()
	tests.Assert(t, r != nil)

	zones, nodes, drives := 1, 2, 4

	// Create ring
	for z := 0; z < zones; z++ {

		// Generate nodes for this zone
		for n := 0; n < nodes; n++ {
			nid := utils.GenUUID()

			// Generate drives for this node
			for d := 0; d < drives; d++ {
				did := utils.GenUUID()

				// Setup simple device
				dev := &SimpleDevice{
					zone:     z,
					deviceId: did,
					nodeId:   nid,
				}
				r.Add(dev)
			}
		}
	}
	tests.Assert(t, r.balancedList == nil)

	// Rebalance
	r.Rebalance()
	tests.Assert(t, r.balancedList != nil)
	tests.Assert(t, len(r.balancedList) == zones*nodes*drives)

	// Get a list for a brick with "00000" id
	// It should return a list equal to balancedList
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("0000000"), r.balancedList))
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("0000001"), append(r.balancedList[1:], r.balancedList[0])))

	// 14 is larger than 1*2*4, 8.. So the index is 14%8 = 6
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("000000e"), append(r.balancedList[6:], r.balancedList[:6]...)))
}
func TestSimpleAllocatorRingRebalance(t *testing.T) {
	r := NewSimpleAllocatorRing()
	tests.Assert(t, r != nil)

	zones, nodes, drives := 10, 100, 48

	// Add 10*100*48 devices to the ring
	for z := 0; z < zones; z++ {

		// Generate nodes for this zone
		for n := 0; n < nodes; n++ {
			nid := utils.GenUUID()

			// Generate drives for this node
			for d := 0; d < drives; d++ {
				did := utils.GenUUID()

				// Setup simple device
				dev := &SimpleDevice{
					zone:     z,
					deviceId: did,
					nodeId:   nid,
				}
				r.Add(dev)
			}
		}
	}
	tests.Assert(t, r.balancedList == nil)

	// Rebalance
	r.Rebalance()
	tests.Assert(t, r.balancedList != nil)
	tests.Assert(t, len(r.balancedList) == zones*nodes*drives)

	// Check balance
	// 1. No zones should be next to eachother in the list
	// 2. Every other element should not have the same node
	for i := range r.balancedList[:len(r.balancedList)-1] {
		tests.Assert(t, r.balancedList[i].zone != r.balancedList[i+1].zone)
	}
	for i := range r.balancedList[:len(r.balancedList)-2] {
		tests.Assert(t, r.balancedList[i].nodeId != r.balancedList[i+2].nodeId)
	}
}
示例#5
0
func NewVolumeEntryFromRequest(req *api.VolumeCreateRequest) *VolumeEntry {
	godbc.Require(req != nil)

	vol := NewVolumeEntry()
	vol.Info.Id = utils.GenUUID()
	vol.Info.Durability = req.Durability
	vol.Info.Snapshot = req.Snapshot
	vol.Info.Size = req.Size

	// Set default durability values
	durability := vol.Info.Durability.Type
	switch {

	case durability == api.DurabilityReplicate:
		logger.Debug("[%v] Replica %v",
			vol.Info.Id,
			vol.Info.Durability.Replicate.Replica)
		vol.Durability = NewVolumeReplicaDurability(&vol.Info.Durability.Replicate)

	case durability == api.DurabilityEC:
		logger.Debug("[%v] EC %v + %v ",
			vol.Info.Id,
			vol.Info.Durability.Disperse.Data,
			vol.Info.Durability.Disperse.Redundancy)
		vol.Durability = NewVolumeDisperseDurability(&vol.Info.Durability.Disperse)

	case durability == api.DurabilityDistributeOnly || durability == "":
		logger.Debug("[%v] Distributed", vol.Info.Id)
		vol.Durability = NewNoneDurability()

	default:
		panic(fmt.Sprintf("BUG: Unknown type: %v\n", vol.Info.Durability))
	}

	// Set the default values accordingly
	vol.Durability.SetDurability()

	// Set default name
	if req.Name == "" {
		vol.Info.Name = "vol_" + vol.Info.Id
	} else {
		vol.Info.Name = req.Name
	}

	// Set default thinp factor
	if vol.Info.Snapshot.Enable && vol.Info.Snapshot.Factor == 0 {
		vol.Info.Snapshot.Factor = DEFAULT_THINP_SNAPSHOT_FACTOR
	} else if !vol.Info.Snapshot.Enable {
		vol.Info.Snapshot.Factor = 1
	}

	// If it is zero, then it will be assigned during volume creation
	vol.Info.Clusters = req.Clusters

	return vol
}
示例#6
0
func TestSimpleAllocatorEmpty(t *testing.T) {
	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	err := a.RemoveDevice(createSampleClusterEntry(),
		createSampleNodeEntry(),
		createSampleDeviceEntry("aaa", 10))
	tests.Assert(t, err == ErrNotFound)

	err = a.RemoveCluster("aaa")
	tests.Assert(t, err == ErrNotFound)

	ch, _, errc := a.GetNodes(utils.GenUUID(), utils.GenUUID())
	for d := range ch {
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, err == ErrNotFound)
}
示例#7
0
func NewDeviceEntryFromRequest(req *api.DeviceAddRequest) *DeviceEntry {
	godbc.Require(req != nil)

	device := NewDeviceEntry()
	device.Info.Id = utils.GenUUID()
	device.Info.Name = req.Name
	device.NodeId = req.NodeId

	return device
}
示例#8
0
func NewNodeEntryFromRequest(req *api.NodeAddRequest) *NodeEntry {
	godbc.Require(req != nil)

	node := NewNodeEntry()
	node.Info.Id = utils.GenUUID()
	node.Info.ClusterId = req.ClusterId
	node.Info.Hostnames = req.Hostnames
	node.Info.Zone = req.Zone

	return node
}
示例#9
0
func createSampleDeviceEntry(nodeid string, disksize uint64) *DeviceEntry {

	req := &api.DeviceAddRequest{}
	req.NodeId = nodeid
	req.Name = "/dev/" + utils.GenUUID()[:8]

	d := NewDeviceEntryFromRequest(req)
	d.StorageSet(disksize)

	return d
}
示例#10
0
func TestSimpleAllocatorInitFromDb(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Setup database
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create large cluster
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		10,     // nodes_per_cluster
		20,     // devices_per_node,
		600*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Get the cluster list
	var clusterId string
	err = app.db.View(func(tx *bolt.Tx) error {
		clusters, err := ClusterList(tx)
		if err != nil {
			return err
		}
		tests.Assert(t, len(clusters) == 1)
		clusterId = clusters[0]

		return nil
	})
	tests.Assert(t, err == nil)

	// Create an allocator and initialize it from the DB
	a := NewSimpleAllocatorFromDb(app.db)
	tests.Assert(t, a != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(clusterId, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d != "")
	}
	err = <-errc
	tests.Assert(t, devices == 10*20)
	tests.Assert(t, err == nil)

}
示例#11
0
func TestNewDeviceEntryFromRequest(t *testing.T) {
	req := &api.DeviceAddRequest{}
	req.NodeId = "123"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	tests.Assert(t, d != nil)
	tests.Assert(t, d.Info.Id != "")
	tests.Assert(t, d.Info.Name == req.Name)
	tests.Assert(t, d.Info.Storage.Free == 0)
	tests.Assert(t, d.Info.Storage.Total == 0)
	tests.Assert(t, d.Info.Storage.Used == 0)
	tests.Assert(t, d.NodeId == "123")
	tests.Assert(t, d.Bricks != nil)
	tests.Assert(t, len(d.Bricks) == 0)

}
示例#12
0
func TestNewDeviceEntryNewInfoResponseBadBrickIds(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	// Add bad brick ids
	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var info *api.DeviceInfoResponse
	err = app.db.View(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		info, err = device.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrNotFound)
}
示例#13
0
func TestDeviceEntryNewBrickEntry(t *testing.T) {
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 900
	d.Info.Storage.Total = 1000
	d.Info.Storage.Used = 100

	// Alignment
	d.ExtentSize = 8

	// Too large
	brick := d.NewBrickEntry(1000000000, 1.5)
	tests.Assert(t, brick == nil)

	// --- Now check with a real value ---

	// Check newly created brick
	size := 201
	tpsize := uint64(float32(size) * 1.5)

	// Alignment
	tpsize += d.ExtentSize - (tpsize % d.ExtentSize)

	// Calculate metadatasize
	metadatasize := d.poolMetadataSize(tpsize)

	// Alignment
	metadatasize += d.ExtentSize - (metadatasize % d.ExtentSize)
	total := tpsize + metadatasize

	brick = d.NewBrickEntry(200, 1.5)
	tests.Assert(t, brick != nil)
	tests.Assert(t, brick.TpSize == tpsize)
	tests.Assert(t, brick.PoolMetadataSize == metadatasize, brick.PoolMetadataSize, metadatasize)
	tests.Assert(t, brick.Info.Size == 200)

	// Check it was substracted from device storage
	tests.Assert(t, d.Info.Storage.Used == 100+total)
	tests.Assert(t, d.Info.Storage.Free == 900-total)
	tests.Assert(t, d.Info.Storage.Total == 1000)
}
示例#14
0
func TestNewDeviceEntryFromId(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var device *DeviceEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(device, d))
}
示例#15
0
func NewBrickEntry(size, tpsize, poolMetadataSize uint64, deviceid, nodeid string) *BrickEntry {
	godbc.Require(size > 0)
	godbc.Require(tpsize > 0)
	godbc.Require(deviceid != "")
	godbc.Require(nodeid != "")

	entry := &BrickEntry{}
	entry.TpSize = tpsize
	entry.PoolMetadataSize = poolMetadataSize
	entry.Info.Id = utils.GenUUID()
	entry.Info.Size = size
	entry.Info.NodeId = nodeid
	entry.Info.DeviceId = deviceid

	godbc.Ensure(entry.Info.Id != "")
	godbc.Ensure(entry.TpSize == tpsize)
	godbc.Ensure(entry.Info.Size == size)
	godbc.Ensure(entry.Info.NodeId == nodeid)
	godbc.Ensure(entry.Info.DeviceId == deviceid)

	return entry
}
示例#16
0
func TestNewDeviceEntryMarshal(t *testing.T) {
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	buffer, err := d.Marshal()
	tests.Assert(t, err == nil)
	tests.Assert(t, buffer != nil)
	tests.Assert(t, len(buffer) > 0)

	um := &DeviceEntry{}
	err = um.Unmarshal(buffer)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(um, d))

}
示例#17
0
func TestNewDeviceEntrySaveDelete(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var device *DeviceEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(device, d))

	// Delete entry which has devices
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		err = device.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrConflict)

	// Delete devices in device
	device.BrickDelete("abc")
	device.BrickDelete("def")
	tests.Assert(t, len(device.Bricks) == 0)
	err = app.db.Update(func(tx *bolt.Tx) error {
		return device.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now try to delete the device
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		err = device.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)

	// Check device has been deleted and is not in db
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == ErrNotFound)
}
示例#18
0
func TestClientVolume(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)

	// Create node request packet
	for n := 0; n < 4; n++ {
		nodeReq := &api.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)}
		nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)}
		nodeReq.Zone = n + 1

		// Create node
		node, err := c.NodeAdd(nodeReq)
		tests.Assert(t, err == nil)

		// Create a device request
		sg := utils.NewStatusGroup()
		for i := 0; i < 50; i++ {
			sg.Add(1)
			go func() {
				defer sg.Done()

				deviceReq := &api.DeviceAddRequest{}
				deviceReq.Name = "sd" + utils.GenUUID()[:8]
				deviceReq.NodeId = node.Id

				// Create device
				err := c.DeviceAdd(deviceReq)
				sg.Err(err)

			}()
		}
		tests.Assert(t, sg.Result() == nil)
	}

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	// Create a volume
	volumeReq := &api.VolumeCreateRequest{}
	volumeReq.Size = 10
	volume, err := c.VolumeCreate(volumeReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volume.Id != "")
	tests.Assert(t, volume.Size == volumeReq.Size)

	// Get list of volumes
	list, err = c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 1)
	tests.Assert(t, list.Volumes[0] == volume.Id)

	// Get info on incorrect id
	info, err := c.VolumeInfo("badid")
	tests.Assert(t, err != nil)

	// Get info
	info, err = c.VolumeInfo(volume.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(info, volume))

	// Expand volume with a bad id
	expandReq := &api.VolumeExpandRequest{}
	expandReq.Size = 10
	volumeInfo, err := c.VolumeExpand("badid", expandReq)
	tests.Assert(t, err != nil)

	// Expand volume
	volumeInfo, err = c.VolumeExpand(volume.Id, expandReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volumeInfo.Size == 20)

	// Delete bad id
	err = c.VolumeDelete("badid")
	tests.Assert(t, err != nil)

	// Delete volume
	err = c.VolumeDelete(volume.Id)
	tests.Assert(t, err == nil)

	clusterInfo, err := c.ClusterInfo(cluster.Id)
	for _, nodeid := range clusterInfo.Nodes {
		// Get node information
		nodeInfo, err := c.NodeInfo(nodeid)
		tests.Assert(t, err == nil)

		// Delete all devices
		sg := utils.NewStatusGroup()
		for index := range nodeInfo.DevicesInfo {
			sg.Add(1)
			go func(i int) {
				defer sg.Done()
				sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
			}(index)
		}
		err = sg.Result()
		tests.Assert(t, err == nil, err)

		// Delete node
		err = c.NodeDelete(nodeid)
		tests.Assert(t, err == nil)

	}

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}
示例#19
0
func TestSimpleAllocatorInitFromDbWithOfflineDevices(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Setup database
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create large cluster
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		2,      // nodes_per_cluster
		4,      // devices_per_node,
		600*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Get the cluster list
	var clusterId, nodeId string
	err = app.db.Update(func(tx *bolt.Tx) error {
		clusters, err := ClusterList(tx)
		if err != nil {
			return err
		}
		tests.Assert(t, len(clusters) == 1)
		clusterId = clusters[0]

		cluster, err := NewClusterEntryFromId(tx, clusterId)
		tests.Assert(t, err == nil)

		// make one node offline, which will mean none of its
		// devices are added to the ring
		node, err := cluster.NodeEntryFromClusterIndex(tx, 0)
		tests.Assert(t, err == nil)
		nodeId = node.Info.Id
		node.State = api.EntryStateOffline
		node.Save(tx)

		// Make only one device offline in the other node
		node, err = cluster.NodeEntryFromClusterIndex(tx, 1)
		device, err := NewDeviceEntryFromId(tx, node.Devices[0])
		device.State = api.EntryStateOffline
		device.Save(tx)

		return nil
	})
	tests.Assert(t, err == nil)

	// Create an allocator and initialize it from the DB
	a := NewSimpleAllocatorFromDb(app.db)
	tests.Assert(t, a != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(clusterId, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d != "")
	}
	err = <-errc
	tests.Assert(t, err == nil)

	// Only three online devices should be in the list
	tests.Assert(t, devices == 3, devices)

}
示例#20
0
func TestTopology(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster correctly
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)

	//Create multiple clusters
	clusteridlist := make([]api.ClusterInfoResponse, 0)
	for m := 0; m < 4; m++ {
		cluster, err := c.ClusterCreate()
		tests.Assert(t, err == nil)
		tests.Assert(t, cluster.Id != "")
		clusteridlist = append(clusteridlist, *cluster)
	}
	tests.Assert(t, len(clusteridlist) == 4)

	//Verify the topology info and then delete the clusters
	topology, err := c.TopologyInfo()
	tests.Assert(t, err == nil)
	for _, cid := range topology.ClusterList {
		clusterid := cid.Id
		err = c.ClusterDelete(clusterid)
		tests.Assert(t, err == nil)
	}

	//Create a cluster and add multiple nodes,devices and volumes
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)
	tests.Assert(t, cluster.Id != "")
	tests.Assert(t, len(cluster.Nodes) == 0)
	tests.Assert(t, len(cluster.Volumes) == 0)

	// Get information about the client
	clusterinfo, err := c.ClusterInfo(cluster.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(clusterinfo, cluster))

	// Get information about the Topology and verify the cluster creation
	topology, err = c.TopologyInfo()
	tests.Assert(t, err == nil)
	tests.Assert(t, topology.ClusterList[0].Id == cluster.Id)

	// Create multiple nodes and add devices to the nodes
	nodeinfos := make([]api.NodeInfoResponse, 0)
	for n := 0; n < 4; n++ {
		nodeReq := &api.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)}
		nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)}
		nodeReq.Zone = n + 1

		// Create node
		node, err := c.NodeAdd(nodeReq)
		nodeinfos = append(nodeinfos, *node)
		tests.Assert(t, err == nil)

		// Create a device request
		sg := utils.NewStatusGroup()
		for i := 0; i < 50; i++ {
			sg.Add(1)
			go func() {
				defer sg.Done()

				deviceReq := &api.DeviceAddRequest{}
				deviceReq.Name = "sd" + utils.GenUUID()[:8]
				deviceReq.NodeId = node.Id

				// Create device
				err := c.DeviceAdd(deviceReq)
				sg.Err(err)
			}()
		}
		tests.Assert(t, sg.Result() == nil)
	}
	tests.Assert(t, len(nodeinfos) != 0)

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	//Create multiple volumes to the cluster
	volumeinfos := make([]api.VolumeInfoResponse, 0)
	for n := 0; n < 4; n++ {
		volumeReq := &api.VolumeCreateRequest{}
		volumeReq.Size = 10
		volume, err := c.VolumeCreate(volumeReq)
		tests.Assert(t, err == nil)
		tests.Assert(t, volume.Id != "")
		tests.Assert(t, volume.Size == volumeReq.Size)
		volumeinfos = append(volumeinfos, *volume)
	}
	topology, err = c.TopologyInfo()
	tests.Assert(t, err == nil)

	// Test topology have all the existing volumes
	var volumefound int
	for _, volumeid := range topology.ClusterList[0].Volumes {
		volumeInfo := volumeid
		for _, singlevolumei := range volumeinfos {
			if singlevolumei.Id == volumeInfo.Id {
				volumefound++
				break
			}
		}
	}
	tests.Assert(t, volumefound == 4)

	// Delete all the volumes
	for _, volumeid := range topology.ClusterList[0].Volumes {
		volumeInfo := volumeid
		err = c.VolumeDelete(volumeInfo.Id)
		tests.Assert(t, err == nil)

	}

	// Verify the nodes and devices info from topology info and delete the entries
	for _, nodeid := range topology.ClusterList[0].Nodes {
		nodeInfo := nodeid
		var found bool
		for _, singlenodei := range nodeinfos {
			found = false
			if singlenodei.Id == nodeInfo.Id {
				found = true
				break
			}
		}
		tests.Assert(t, found == true)

		// Delete all devices
		sg := utils.NewStatusGroup()
		for index := range nodeInfo.DevicesInfo {
			sg.Add(1)
			go func(i int) {
				defer sg.Done()
				sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
			}(index)
		}
		err = sg.Result()
		tests.Assert(t, err == nil, err)

		// Delete node
		err = c.NodeDelete(nodeInfo.Id)
		tests.Assert(t, err == nil)

	}

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}
示例#21
0
func (v *VolumeEntry) allocBricks(
	db *bolt.DB,
	allocator Allocator,
	cluster string,
	bricksets int,
	brick_size uint64) (brick_entries []*BrickEntry, e error) {

	// Setup garbage collector function in case of error
	defer func() {

		// Check the named return value 'err'
		if e != nil {
			logger.Debug("Error detected.  Cleaning up volume %v: Len(%v) ", v.Info.Id, len(brick_entries))
			db.Update(func(tx *bolt.Tx) error {
				for _, brick := range brick_entries {
					v.removeBrickFromDb(tx, brick)
				}
				return nil
			})
		}
	}()

	// Initialize brick_entries
	brick_entries = make([]*BrickEntry, 0)

	// Determine allocation for each brick required for this volume
	for brick_num := 0; brick_num < bricksets; brick_num++ {
		logger.Info("brick_num: %v", brick_num)

		// Create a brick set list to later make sure that the
		// proposed bricks and devices are acceptable
		setlist := make([]*BrickEntry, 0)

		// Generate an id for the brick
		brickId := utils.GenUUID()

		// Get allocator generator
		// The same generator should be used for the brick and its replicas
		deviceCh, done, errc := allocator.GetNodes(cluster, brickId)
		defer func() {
			close(done)
		}()

		// Check location has space for each brick and its replicas
		for i := 0; i < v.Durability.BricksInSet(); i++ {
			logger.Debug("%v / %v", i, v.Durability.BricksInSet())

			// Do the work in the database context so that the cluster
			// data does not change while determining brick location
			err := db.Update(func(tx *bolt.Tx) error {

				// Check the ring for devices to place the brick
				for deviceId := range deviceCh {

					// Get device entry
					device, err := NewDeviceEntryFromId(tx, deviceId)
					if err != nil {
						return err
					}

					// Do not allow a device from the same node to be
					// in the set
					deviceOk := true
					for _, brickInSet := range setlist {
						if brickInSet.Info.NodeId == device.NodeId {
							deviceOk = false
						}
					}

					if !deviceOk {
						continue
					}

					// Try to allocate a brick on this device
					brick := device.NewBrickEntry(brick_size, float64(v.Info.Snapshot.Factor))

					// Determine if it was successful
					if brick != nil {

						// If the first in the set, the reset the id
						if i == 0 {
							brick.SetId(brickId)
						}

						// Save the brick entry to create later
						brick_entries = append(brick_entries, brick)

						// Add to set list
						setlist = append(setlist, brick)

						// Add brick to device
						device.BrickAdd(brick.Id())

						// Add brick to volume
						v.BrickAdd(brick.Id())

						// Save values
						err := device.Save(tx)
						if err != nil {
							return err
						}
						return nil
					}
				}

				// Check if allocator returned an error
				if err := <-errc; err != nil {
					return err
				}

				// No devices found
				return ErrNoSpace

			})
			if err != nil {
				return brick_entries, err
			}
		}
	}

	return brick_entries, nil

}
示例#22
0
func NewClusterEntryFromRequest() *ClusterEntry {
	entry := NewClusterEntry()
	entry.Info.Id = utils.GenUUID()

	return entry
}
示例#23
0
func TestNewDeviceEntryNewInfoResponse(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000

	// Create a brick
	b := &BrickEntry{}
	b.Info.Id = "bbb"
	b.Info.Size = 10
	b.Info.NodeId = "abc"
	b.Info.DeviceId = d.Info.Id
	b.Info.Path = "/somepath"

	// Add brick to device
	d.BrickAdd("bbb")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		err := d.Save(tx)
		if err != nil {
			return err
		}

		return b.Save(tx)
	})
	tests.Assert(t, err == nil)

	var info *api.DeviceInfoResponse
	err = app.db.View(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		info, err = device.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, info.Id == d.Info.Id)
	tests.Assert(t, info.Name == d.Info.Name)
	tests.Assert(t, reflect.DeepEqual(info.Storage, d.Info.Storage))
	tests.Assert(t, len(info.Bricks) == 1)
	tests.Assert(t, info.Bricks[0].Id == "bbb")
	tests.Assert(t, info.Bricks[0].Path == "/somepath")
	tests.Assert(t, info.Bricks[0].NodeId == "abc")
	tests.Assert(t, info.Bricks[0].DeviceId == d.Info.Id)
	tests.Assert(t, info.Bricks[0].Size == 10)

}
示例#24
0
func TestDeviceEntryRegister(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &api.DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()

	d := NewDeviceEntryFromRequest(req)

	// Register node
	err := app.db.Update(func(tx *bolt.Tx) error {
		err := d.Register(tx)
		tests.Assert(t, err == nil)

		return err
	})
	tests.Assert(t, err == nil)

	// Should not be able to register again
	err = app.db.Update(func(tx *bolt.Tx) error {
		err := d.Register(tx)
		tests.Assert(t, err != nil)

		return err
	})
	tests.Assert(t, err != nil)

	// Create another device on a different node device
	req = &api.DeviceAddRequest{}
	req.NodeId = "def"
	req.Name = "/dev/" + utils.GenUUID()

	d2 := NewDeviceEntryFromRequest(req)

	// Same device on different node should work
	err = app.db.Update(func(tx *bolt.Tx) error {
		err := d2.Register(tx)
		tests.Assert(t, err == nil)

		return err
	})
	tests.Assert(t, err == nil)

	// Remove d
	err = app.db.Update(func(tx *bolt.Tx) error {
		err := d.Deregister(tx)
		tests.Assert(t, err == nil)

		return err
	})
	tests.Assert(t, err == nil)

	// Register d node again
	err = app.db.Update(func(tx *bolt.Tx) error {
		err := d.Register(tx)
		tests.Assert(t, err == nil)

		return err
	})
	tests.Assert(t, err == nil)

}