Esempio n. 1
0
func TestSimpleAllocatorAddRemoveDevice(t *testing.T) {
	/*
		tmpfile := tests.Tempfile()
		defer os.Remove(tmpfile)

		// Setup database
		app := NewTestApp(tmpfile)
		defer app.Close()
	*/

	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	cluster := createSampleClusterEntry()
	node := createSampleNodeEntry()
	node.Info.ClusterId = cluster.Info.Id
	device := createSampleDeviceEntry(node.Info.Id, 10000)

	tests.Assert(t, len(a.rings) == 0)
	err := a.AddDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)
	tests.Assert(t, a.rings[cluster.Info.Id] != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(cluster.Info.Id, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d == device.Info.Id)
	}
	err = <-errc
	tests.Assert(t, devices == 1)
	tests.Assert(t, err == nil)

	// Now remove the device
	err = a.RemoveDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)

	// Get the nodes from the ring
	ch, _, errc = a.GetNodes(cluster.Info.Id, utils.GenUUID())

	devices = 0
	for d := range ch {
		devices++
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, devices == 0)
	tests.Assert(t, err == nil)

}
Esempio n. 2
0
func createSampleNodeEntry() *NodeEntry {
	req := &NodeAddRequest{
		ClusterId: "123",
		Hostnames: HostAddresses{
			Manage:  []string{"manage" + utils.GenUUID()[:8]},
			Storage: []string{"storage" + utils.GenUUID()[:8]},
		},
		Zone: 99,
	}

	return NewNodeEntryFromRequest(req)
}
Esempio n. 3
0
func NewVolumeEntryFromRequest(req *VolumeCreateRequest) *VolumeEntry {
	godbc.Require(req != nil)

	vol := NewVolumeEntry()
	vol.Info.Id = utils.GenUUID()
	vol.Info.Durability = req.Durability
	vol.Info.Snapshot = req.Snapshot
	vol.Info.Size = req.Size

	// Set default durability values
	durability := vol.Info.Durability.Type
	switch {

	case durability == DURABILITY_STRING_REPLICATE:
		logger.Debug("[%v] Replica %v",
			vol.Info.Id,
			vol.Info.Durability.Replicate.Replica)
		vol.Durability = &vol.Info.Durability.Replicate

	case durability == DURABILITY_STRING_EC:
		logger.Debug("[%v] EC %v + %v ",
			vol.Info.Id,
			vol.Info.Durability.Disperse.Data,
			vol.Info.Durability.Disperse.Redundancy)
		vol.Durability = &vol.Info.Durability.Disperse

	case durability == DURABILITY_STRING_DISTRIBUTE_ONLY || durability == "":
		logger.Debug("[%v] Distributed", vol.Info.Id)
		vol.Durability = NewNoneDurability()

	default:
		panic(fmt.Sprintf("BUG: Unknown type: %v\n", vol.Info.Durability))
	}

	// Set the default values accordingly
	vol.Durability.SetDurability()

	// Set default name
	if req.Name == "" {
		vol.Info.Name = "vol_" + vol.Info.Id
	} else {
		vol.Info.Name = req.Name
	}

	// Set default thinp factor
	if vol.Info.Snapshot.Enable && vol.Info.Snapshot.Factor == 0 {
		vol.Info.Snapshot.Factor = DEFAULT_THINP_SNAPSHOT_FACTOR
	} else if !vol.Info.Snapshot.Enable {
		vol.Info.Snapshot.Factor = 1
	}

	// If it is zero, then it will be assigned during volume creation
	vol.Info.Clusters = req.Clusters

	return vol
}
Esempio n. 4
0
func TestSimpleAllocatorEmpty(t *testing.T) {
	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	err := a.RemoveDevice(createSampleClusterEntry(),
		createSampleNodeEntry(),
		createSampleDeviceEntry("aaa", 10))
	tests.Assert(t, err == ErrNotFound)

	err = a.RemoveCluster("aaa")
	tests.Assert(t, err == ErrNotFound)

	ch, _, errc := a.GetNodes(utils.GenUUID(), utils.GenUUID())
	for d := range ch {
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, err == ErrNotFound)
}
Esempio n. 5
0
func NewNodeEntryFromRequest(req *NodeAddRequest) *NodeEntry {
	godbc.Require(req != nil)

	node := NewNodeEntry()
	node.Info.Id = utils.GenUUID()
	node.Info.ClusterId = req.ClusterId
	node.Info.Hostnames = req.Hostnames
	node.Info.Zone = req.Zone

	return node
}
Esempio n. 6
0
func NewDeviceEntryFromRequest(req *DeviceAddRequest) *DeviceEntry {
	godbc.Require(req != nil)

	device := NewDeviceEntry()
	device.Info.Id = utils.GenUUID()
	device.Info.Name = req.Name
	device.Info.Weight = req.Weight
	device.NodeId = req.NodeId

	return device
}
Esempio n. 7
0
// Use to create a new asynchronous operation handler.
// Only use this function if you need to do every step by hand.
// It is recommended to use AsyncHttpRedirectFunc() instead
func (a *AsyncHttpManager) NewHandler() *AsyncHttpHandler {
	handler := &AsyncHttpHandler{
		manager: a,
		id:      utils.GenUUID(),
	}

	a.lock.Lock()
	defer a.lock.Unlock()

	a.handlers[handler.id] = handler

	return handler
}
Esempio n. 8
0
func TestSimpleAllocatorInitFromDb(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Setup database
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create large cluster
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		10,     // nodes_per_cluster
		20,     // devices_per_node,
		600*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Get the cluster list
	var clusterId string
	err = app.db.View(func(tx *bolt.Tx) error {
		clusters, err := ClusterList(tx)
		if err != nil {
			return err
		}
		tests.Assert(t, len(clusters) == 1)
		clusterId = clusters[0]

		return nil
	})
	tests.Assert(t, err == nil)

	// Create an allocator and initialize it from the DB
	a := NewSimpleAllocatorFromDb(app.db)
	tests.Assert(t, a != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(clusterId, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d != "")
	}
	err = <-errc
	tests.Assert(t, devices == 10*20)
	tests.Assert(t, err == nil)

}
Esempio n. 9
0
func NewBrickEntry(size, tpsize, poolMetadataSize uint64, deviceid, nodeid string) *BrickEntry {
	godbc.Require(size > 0)
	godbc.Require(tpsize > 0)
	godbc.Require(deviceid != "")
	godbc.Require(nodeid != "")

	entry := &BrickEntry{}
	entry.TpSize = tpsize
	entry.PoolMetadataSize = poolMetadataSize
	entry.Info.Id = utils.GenUUID()
	entry.Info.Size = size
	entry.Info.NodeId = nodeid
	entry.Info.DeviceId = deviceid

	godbc.Ensure(entry.Info.Id != "")
	godbc.Ensure(entry.TpSize == tpsize)
	godbc.Ensure(entry.Info.Size == size)
	godbc.Ensure(entry.Info.NodeId == nodeid)
	godbc.Ensure(entry.Info.DeviceId == deviceid)

	return entry
}
Esempio n. 10
0
func TestClientVolume(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)

	// Create node request packet
	for n := 0; n < 4; n++ {
		nodeReq := &glusterfs.NodeAddRequest{}
		nodeReq.ClusterId = cluster.Id
		nodeReq.Hostnames.Manage = []string{"manage" + fmt.Sprintf("%v", n)}
		nodeReq.Hostnames.Storage = []string{"storage" + fmt.Sprintf("%v", n)}
		nodeReq.Zone = n + 1

		// Create node
		node, err := c.NodeAdd(nodeReq)
		tests.Assert(t, err == nil)

		// Create a device request
		sg := utils.NewStatusGroup()
		for i := 0; i < 50; i++ {
			sg.Add(1)
			go func() {
				defer sg.Done()

				deviceReq := &glusterfs.DeviceAddRequest{}
				deviceReq.Name = "sd" + utils.GenUUID()[:8]
				deviceReq.Weight = 100
				deviceReq.NodeId = node.Id

				// Create device
				err := c.DeviceAdd(deviceReq)
				sg.Err(err)

			}()
		}
		tests.Assert(t, sg.Result() == nil)
	}

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	// Create a volume
	volumeReq := &glusterfs.VolumeCreateRequest{}
	volumeReq.Size = 10
	volume, err := c.VolumeCreate(volumeReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volume.Id != "")
	tests.Assert(t, volume.Size == volumeReq.Size)

	// Get list of volumes
	list, err = c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 1)
	tests.Assert(t, list.Volumes[0] == volume.Id)

	// Get info on incorrect id
	info, err := c.VolumeInfo("badid")
	tests.Assert(t, err != nil)

	// Get info
	info, err = c.VolumeInfo(volume.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(info, volume))

	// Expand volume with a bad id
	expandReq := &glusterfs.VolumeExpandRequest{}
	expandReq.Size = 10
	volumeInfo, err := c.VolumeExpand("badid", expandReq)
	tests.Assert(t, err != nil)

	// Expand volume
	volumeInfo, err = c.VolumeExpand(volume.Id, expandReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volumeInfo.Size == 20)

	// Delete bad id
	err = c.VolumeDelete("badid")
	tests.Assert(t, err != nil)

	// Delete volume
	err = c.VolumeDelete(volume.Id)
	tests.Assert(t, err == nil)

	clusterInfo, err := c.ClusterInfo(cluster.Id)
	for _, nodeid := range clusterInfo.Nodes {
		// Get node information
		nodeInfo, err := c.NodeInfo(nodeid)
		tests.Assert(t, err == nil)

		// Delete all devices
		sg := utils.NewStatusGroup()
		for index := range nodeInfo.DevicesInfo {
			sg.Add(1)
			go func(i int) {
				defer sg.Done()
				sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
			}(index)
		}
		err = sg.Result()
		tests.Assert(t, err == nil, err)

		// Delete node
		err = c.NodeDelete(nodeid)
		tests.Assert(t, err == nil)

	}

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}
Esempio n. 11
0
func (v *VolumeEntry) allocBricks(
	db *bolt.DB,
	allocator Allocator,
	cluster string,
	bricksets int,
	brick_size uint64) (brick_entries []*BrickEntry, e error) {

	// Setup garbage collector function in case of error
	defer func() {

		// Check the named return value 'err'
		if e != nil {
			logger.Debug("Error detected.  Cleaning up volume %v: Len(%v) ", v.Info.Id, len(brick_entries))
			db.Update(func(tx *bolt.Tx) error {
				for _, brick := range brick_entries {
					v.removeBrickFromDb(tx, brick)
				}
				return nil
			})
		}
	}()

	// Initialize brick_entries
	brick_entries = make([]*BrickEntry, 0)

	// Determine allocation for each brick required for this volume
	for brick_num := 0; brick_num < bricksets; brick_num++ {
		logger.Info("brick_num: %v", brick_num)

		// Create a brick set list to later make sure that the
		// proposed bricks and devices are acceptable
		setlist := make([]*BrickEntry, 0)

		// Generate an id for the brick
		brickId := utils.GenUUID()

		// Get allocator generator
		// The same generator should be used for the brick and its replicas
		deviceCh, done, errc := allocator.GetNodes(cluster, brickId)
		defer func() {
			close(done)
		}()

		// Check location has space for each brick and its replicas
		for i := 0; i < v.Durability.BricksInSet(); i++ {
			logger.Debug("%v / %v", i, v.Durability.BricksInSet())

			// Do the work in the database context so that the cluster
			// data does not change while determining brick location
			err := db.Update(func(tx *bolt.Tx) error {

				// Check the ring for devices to place the brick
				for deviceId := range deviceCh {

					// Get device entry
					device, err := NewDeviceEntryFromId(tx, deviceId)
					if err != nil {
						return err
					}

					// Do not allow a device from the same node to be
					// in the set
					deviceOk := true
					for _, brickInSet := range setlist {
						if brickInSet.Info.NodeId == device.NodeId {
							deviceOk = false
						}
					}

					if !deviceOk {
						continue
					}

					// Try to allocate a brick on this device
					brick := device.NewBrickEntry(brick_size, float64(v.Info.Snapshot.Factor))

					// Determine if it was successful
					if brick != nil {

						// If the first in the set, the reset the id
						if i == 0 {
							brick.SetId(brickId)
						}

						// Save the brick entry to create later
						brick_entries = append(brick_entries, brick)

						// Add to set list
						setlist = append(setlist, brick)

						// Add brick to device
						device.BrickAdd(brick.Id())

						// Add brick to volume
						v.BrickAdd(brick.Id())

						// Save values
						err := device.Save(tx)
						if err != nil {
							return err
						}
						return nil
					}
				}

				// Check if allocator returned an error
				if err := <-errc; err != nil {
					return err
				}

				// No devices found
				return ErrNoSpace

			})
			if err != nil {
				return brick_entries, err
			}
		}
	}

	return brick_entries, nil

}
Esempio n. 12
0
func NewClusterEntryFromRequest() *ClusterEntry {
	entry := NewClusterEntry()
	entry.Info.Id = utils.GenUUID()

	return entry
}