Beispiel #1
0
func main() {
	zones, nodes, drives := 10, 100, 48

	list := []*Device{}

	index := 0
	for z := 0; z < zones; z++ {
		for n := 0; n < nodes; n++ {
			nid := utils.GenUUID()[:4]
			for d := 0; d < drives; d++ {
				did := utils.GenUUID()[:4]
				dev := &Device{
					deviceid: did,
					nodeid:   nid,
					zone:     z,
				}
				list = append(list, dev)
				index++
				//s = append(s, fmt.Sprintf("d%v:n%v:z%v", utils.GenUUID()[:4], nid, z))
			}
		}
	}
	fmt.Println(list)
	fmt.Println("-------")

	t := NewTopology()
	for _, d := range list {
		t.Add(d)
	}
	l := t.Rebalance()
	fmt.Println(l)

	fmt.Println(len(l))
}
Beispiel #2
0
func TestSimpleAllocatorAddRemoveDevice(t *testing.T) {
	/*
		tmpfile := tests.Tempfile()
		defer os.Remove(tmpfile)

		// Setup database
		app := NewTestApp(tmpfile)
		defer app.Close()
	*/

	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	cluster := createSampleClusterEntry()
	node := createSampleNodeEntry()
	node.Info.ClusterId = cluster.Info.Id
	device := createSampleDeviceEntry(node.Info.Id, 10000)

	tests.Assert(t, len(a.rings) == 0)
	err := a.AddDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)
	tests.Assert(t, a.rings[cluster.Info.Id] != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(cluster.Info.Id, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d == device.Info.Id)
	}
	err = <-errc
	tests.Assert(t, devices == 1)
	tests.Assert(t, err == nil)

	// Now remove the device
	err = a.RemoveDevice(cluster, node, device)
	tests.Assert(t, err == nil)
	tests.Assert(t, len(a.rings) == 1)

	// Get the nodes from the ring
	ch, _, errc = a.GetNodes(cluster.Info.Id, utils.GenUUID())

	devices = 0
	for d := range ch {
		devices++
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, devices == 0)
	tests.Assert(t, err == nil)

}
Beispiel #3
0
func createSampleNodeEntry() *NodeEntry {
	req := &NodeAddRequest{
		ClusterId: "123",
		Hostnames: HostAddresses{
			Manage:  []string{"manage" + utils.GenUUID()[:8]},
			Storage: []string{"storage" + utils.GenUUID()[:8]},
		},
		Zone: 99,
	}

	return NewNodeEntryFromRequest(req)
}
Beispiel #4
0
func NewBrick(size uint64, db *GlusterFSDB) *Brick {
	return &Brick{
		Id:   utils.GenUUID(),
		Size: size,
		db:   db,
	}
}
func TestSimpleAllocatorGetDeviceList(t *testing.T) {
	r := NewSimpleAllocatorRing()
	tests.Assert(t, r != nil)

	zones, nodes, drives := 1, 2, 4

	// Create ring
	for z := 0; z < zones; z++ {

		// Generate nodes for this zone
		for n := 0; n < nodes; n++ {
			nid := utils.GenUUID()

			// Generate drives for this node
			for d := 0; d < drives; d++ {
				did := utils.GenUUID()

				// Setup simple device
				dev := &SimpleDevice{
					zone:     z,
					deviceId: did,
					nodeId:   nid,
				}
				r.Add(dev)
			}
		}
	}
	tests.Assert(t, r.balancedList == nil)

	// Rebalance
	r.Rebalance()
	tests.Assert(t, r.balancedList != nil)
	tests.Assert(t, len(r.balancedList) == zones*nodes*drives)

	// Get a list for a brick with "00000" id
	// It should return a list equal to balancedList
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("0000000"), r.balancedList))
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("0000001"), append(r.balancedList[1:], r.balancedList[0])))

	// 14 is larger than 1*2*4, 8.. So the index is 14%8 = 6
	tests.Assert(t,
		reflect.DeepEqual(r.GetDeviceList("000000e"), append(r.balancedList[6:], r.balancedList[:6]...)))
}
func TestSimpleAllocatorRingRebalance(t *testing.T) {
	r := NewSimpleAllocatorRing()
	tests.Assert(t, r != nil)

	zones, nodes, drives := 10, 100, 48

	// Add 10*100*48 devices to the ring
	for z := 0; z < zones; z++ {

		// Generate nodes for this zone
		for n := 0; n < nodes; n++ {
			nid := utils.GenUUID()

			// Generate drives for this node
			for d := 0; d < drives; d++ {
				did := utils.GenUUID()

				// Setup simple device
				dev := &SimpleDevice{
					zone:     z,
					deviceId: did,
					nodeId:   nid,
				}
				r.Add(dev)
			}
		}
	}
	tests.Assert(t, r.balancedList == nil)

	// Rebalance
	r.Rebalance()
	tests.Assert(t, r.balancedList != nil)
	tests.Assert(t, len(r.balancedList) == zones*nodes*drives)

	// Check balance
	// 1. No zones should be next to eachother in the list
	// 2. Every other element should not have the same node
	for i := range r.balancedList[:len(r.balancedList)-1] {
		tests.Assert(t, r.balancedList[i].zone != r.balancedList[i+1].zone)
	}
	for i := range r.balancedList[:len(r.balancedList)-2] {
		tests.Assert(t, r.balancedList[i].nodeId != r.balancedList[i+2].nodeId)
	}
}
Beispiel #7
0
func NewVolumeEntryFromRequest(req *VolumeCreateRequest) *VolumeEntry {
	godbc.Require(req != nil)

	vol := NewVolumeEntry()
	vol.Info.Id = utils.GenUUID()
	vol.Info.Durability = req.Durability
	vol.Info.Snapshot = req.Snapshot
	vol.Info.Size = req.Size

	// Set default durability values
	durability := vol.Info.Durability.Type
	switch {

	case durability == DURABILITY_STRING_REPLICATE:
		logger.Debug("[%v] Replica %v",
			vol.Info.Id,
			vol.Info.Durability.Replicate.Replica)
		vol.Durability = &vol.Info.Durability.Replicate

	case durability == DURABILITY_STRING_EC:
		logger.Debug("[%v] EC %v + %v ",
			vol.Info.Id,
			vol.Info.Durability.Disperse.Data,
			vol.Info.Durability.Disperse.Redundancy)
		vol.Durability = &vol.Info.Durability.Disperse

	case durability == DURABILITY_STRING_DISTRIBUTE_ONLY || durability == "":
		logger.Debug("[%v] Distributed", vol.Info.Id, vol.Info.Durability.Replicate.Replica)
		vol.Durability = NewNoneDurability()

	default:
		panic(fmt.Sprintf("BUG: Unknown type: %v\n", vol.Info.Durability))
	}

	// Set the default values accordingly
	vol.Durability.SetDurability()

	// Set default name
	if req.Name == "" {
		vol.Info.Name = "vol_" + vol.Info.Id
	} else {
		vol.Info.Name = req.Name
	}

	// Set default thinp factor
	if vol.Info.Snapshot.Enable && vol.Info.Snapshot.Factor == 0 {
		vol.Info.Snapshot.Factor = DEFAULT_THINP_SNAPSHOT_FACTOR
	} else if !vol.Info.Snapshot.Enable {
		vol.Info.Snapshot.Factor = 1
	}

	// If it is zero, then it will be assigned during volume creation
	vol.Info.Clusters = req.Clusters

	return vol
}
Beispiel #8
0
func TestSimpleAllocatorEmpty(t *testing.T) {
	a := NewSimpleAllocator()
	tests.Assert(t, a != nil)

	err := a.RemoveDevice(createSampleClusterEntry(),
		createSampleNodeEntry(),
		createSampleDeviceEntry("aaa", 10))
	tests.Assert(t, err == ErrNotFound)

	err = a.RemoveCluster("aaa")
	tests.Assert(t, err == ErrNotFound)

	ch, _, errc := a.GetNodes(utils.GenUUID(), utils.GenUUID())
	for d := range ch {
		tests.Assert(t, false, d)
	}
	err = <-errc
	tests.Assert(t, err == ErrNotFound)
}
Beispiel #9
0
func NewNodeEntryFromRequest(req *NodeAddRequest) *NodeEntry {
	godbc.Require(req != nil)

	node := NewNodeEntry()
	node.Info.Id = utils.GenUUID()
	node.Info.ClusterId = req.ClusterId
	node.Info.Hostnames = req.Hostnames
	node.Info.Zone = req.Zone

	return node
}
Beispiel #10
0
func NewDeviceEntryFromRequest(req *Device, nodeid string) *DeviceEntry {
	godbc.Require(req != nil)

	device := NewDeviceEntry()
	device.Info.Id = utils.GenUUID()
	device.Info.Name = req.Name
	device.Info.Weight = req.Weight
	device.NodeId = nodeid

	return device
}
Beispiel #11
0
func createSampleDeviceEntry(nodeid string, disksize uint64) *DeviceEntry {

	req := &DeviceAddRequest{}
	req.NodeId = nodeid
	req.Name = "/dev/" + utils.GenUUID()[:8]
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.StorageSet(disksize)

	return d
}
Beispiel #12
0
// Use to create a new asynchronous operation handler.
// Only use this function if you need to do every step by hand.
// It is recommended to use AsyncHttpRedirectFunc() instead
func (a *AsyncHttpManager) NewHandler() *AsyncHttpHandler {
	handler := &AsyncHttpHandler{
		manager: a,
		id:      utils.GenUUID(),
	}

	a.lock.Lock()
	defer a.lock.Unlock()

	a.handlers[handler.id] = handler

	return handler
}
Beispiel #13
0
func TestSimpleAllocatorInitFromDb(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Setup database
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create large cluster
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		10,     // nodes_per_cluster
		20,     // devices_per_node,
		600*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Get the cluster list
	var clusterId string
	err = app.db.View(func(tx *bolt.Tx) error {
		clusters, err := ClusterList(tx)
		if err != nil {
			return err
		}
		tests.Assert(t, len(clusters) == 1)
		clusterId = clusters[0]

		return nil
	})
	tests.Assert(t, err == nil)

	// Create an allocator and initialize it from the DB
	a := NewSimpleAllocatorFromDb(app.db)
	tests.Assert(t, a != nil)

	// Get the nodes from the ring
	ch, _, errc := a.GetNodes(clusterId, utils.GenUUID())

	var devices int
	for d := range ch {
		devices++
		tests.Assert(t, d != "")
	}
	err = <-errc
	tests.Assert(t, devices == 10*20)
	tests.Assert(t, err == nil)

}
Beispiel #14
0
func TestDeviceEntryNewBrickEntry(t *testing.T) {
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 900
	d.Info.Storage.Total = 1000
	d.Info.Storage.Used = 100

	// Alignment
	d.ExtentSize = 8

	// Too large
	brick := d.NewBrickEntry(1000000000, 1.5)
	tests.Assert(t, brick == nil)

	// --- Now check with a real value ---

	// Check newly created brick
	size := 201
	tpsize := uint64(float32(size) * 1.5)

	// Alignment
	tpsize += d.ExtentSize - (tpsize % d.ExtentSize)

	// Calculate metadatasize
	metadatasize := d.poolMetadataSize(tpsize)

	// Alignment
	metadatasize += d.ExtentSize - (metadatasize % d.ExtentSize)
	total := tpsize + metadatasize

	brick = d.NewBrickEntry(200, 1.5)
	tests.Assert(t, brick != nil)
	tests.Assert(t, brick.TpSize == tpsize)
	tests.Assert(t, brick.PoolMetadataSize == metadatasize, brick.PoolMetadataSize, metadatasize)
	tests.Assert(t, brick.Info.Size == 200)

	// Check it was substracted from device storage
	tests.Assert(t, d.Info.Storage.Used == 100+total)
	tests.Assert(t, d.Info.Storage.Free == 900-total)
	tests.Assert(t, d.Info.Storage.Total == 1000)
}
Beispiel #15
0
func TestNewDeviceEntryNewInfoResponseBadBrickIds(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	// Add bad brick ids
	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var info *DeviceInfoResponse
	err = app.db.View(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		info, err = device.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrNotFound)
}
Beispiel #16
0
func (a *App) ClusterCreate(w http.ResponseWriter, r *http.Request) {

	// Create a new ClusterInfo
	entry := NewClusterEntry()
	entry.Info.Id = utils.GenUUID()

	// Convert entry to bytes
	buffer, err := entry.Marshal()
	if err != nil {
		http.Error(w, "Unable to create cluster", http.StatusInternalServerError)
		return
	}

	// Add cluster to db
	err = a.db.Update(func(tx *bolt.Tx) error {
		b := tx.Bucket([]byte(BOLTDB_BUCKET_CLUSTER))
		if b == nil {
			logger.LogError("Unable to save new cluster information in db")
			return errors.New("Unable to open bucket")
		}

		err = b.Put([]byte(entry.Info.Id), buffer)
		if err != nil {
			logger.LogError("Unable to save new cluster information in db")
			return err
		}

		return nil

	})

	if err != nil {
		logger.Err(err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	// Send back we created it (as long as we did not fail)
	w.Header().Set("Content-Type", "application/json; charset=UTF-8")
	w.WriteHeader(http.StatusCreated)
	if err := json.NewEncoder(w).Encode(entry.Info); err != nil {
		panic(err)
	}
}
Beispiel #17
0
func (m *MockPlugin) NodeAddDevice(id string, req *requests.DeviceAddRequest) error {

	if node, ok := m.db.nodes[id]; ok {

		for device := range req.Devices {
			dev := &requests.DeviceResponse{}
			dev.Name = req.Devices[device].Name
			dev.Weight = req.Devices[device].Weight
			dev.Id = utils.GenUUID()

			node.node.Devices[dev.Id] = dev
		}

	} else {
		return errors.New("Node not found")
	}

	return nil
}
Beispiel #18
0
func TestNewDeviceEntryFromRequest(t *testing.T) {
	req := &DeviceAddRequest{}
	req.NodeId = "123"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	tests.Assert(t, d != nil)
	tests.Assert(t, d.Info.Id != "")
	tests.Assert(t, d.Info.Name == req.Name)
	tests.Assert(t, d.Info.Weight == req.Weight)
	tests.Assert(t, d.Info.Storage.Free == 0)
	tests.Assert(t, d.Info.Storage.Total == 0)
	tests.Assert(t, d.Info.Storage.Used == 0)
	tests.Assert(t, d.NodeId == "123")
	tests.Assert(t, d.Bricks != nil)
	tests.Assert(t, len(d.Bricks) == 0)

}
Beispiel #19
0
func (m *MockPlugin) VolumeCreate(v *requests.VolumeCreateRequest) (*requests.VolumeInfoResp, error) {

	var err error

	info := &requests.VolumeInfoResp{}
	info.Name = v.Name
	info.Size = v.Size
	info.Id = utils.GenUUID()
	if err != nil {
		return nil, err
	}

	volume := &Volume{
		volume: info,
	}

	m.db.volumes[info.Id] = volume

	return m.VolumeInfo(info.Id)
}
Beispiel #20
0
func NewVolumeEntry(v *requests.VolumeCreateRequest,
	bricks []*Brick,
	replica int,
	db *GlusterFSDB) *VolumeEntry {

	// Save volume information
	vol := &VolumeEntry{}
	vol.Info.Size = v.Size
	vol.Info.Id = utils.GenUUID()
	vol.State.Bricks = bricks
	vol.State.Replica = replica
	vol.db = db

	if v.Name != "" {
		vol.Info.Name = v.Name
	} else {
		vol.Info.Name = "vol_" + vol.Info.Id
	}

	return vol
}
Beispiel #21
0
func (m *MockPlugin) NodeAdd(v *requests.NodeAddRequest) (*requests.NodeInfoResp, error) {

	var err error

	info := &requests.NodeInfoResp{}
	info.Name = v.Name
	info.Zone = v.Zone
	info.Id = utils.GenUUID()
	info.Devices = make(map[string]*requests.DeviceResponse)
	if err != nil {
		return nil, err
	}

	node := &Node{
		node: info,
	}

	m.db.nodes[info.Id] = node

	return m.NodeInfo(info.Id)
}
Beispiel #22
0
func TestNewDeviceEntryFromId(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var device *DeviceEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(device, d))
}
Beispiel #23
0
func NewBrickEntry(size, tpsize, poolMetadataSize uint64, deviceid, nodeid string) *BrickEntry {
	godbc.Require(size > 0)
	godbc.Require(tpsize > 0)
	godbc.Require(deviceid != "")
	godbc.Require(nodeid != "")

	entry := &BrickEntry{}
	entry.TpSize = tpsize
	entry.PoolMetadataSize = poolMetadataSize
	entry.Info.Id = utils.GenUUID()
	entry.Info.Size = size
	entry.Info.NodeId = nodeid
	entry.Info.DeviceId = deviceid

	godbc.Ensure(entry.Info.Id != "")
	godbc.Ensure(entry.TpSize == tpsize)
	godbc.Ensure(entry.Info.Size == size)
	godbc.Ensure(entry.Info.NodeId == nodeid)
	godbc.Ensure(entry.Info.DeviceId == deviceid)

	return entry
}
Beispiel #24
0
func TestNewDeviceEntryMarshal(t *testing.T) {
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	buffer, err := d.Marshal()
	tests.Assert(t, err == nil)
	tests.Assert(t, buffer != nil)
	tests.Assert(t, len(buffer) > 0)

	um := &DeviceEntry{}
	err = um.Unmarshal(buffer)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(um, d))

}
Beispiel #25
0
func TestClientVolume(t *testing.T) {
	db := tests.Tempfile()
	defer os.Remove(db)

	// Create the app
	app := glusterfs.NewTestApp(db)
	defer app.Close()

	// Setup the server
	ts := setupHeketiServer(app)
	defer ts.Close()

	// Create cluster
	c := NewClient(ts.URL, "admin", TEST_ADMIN_KEY)
	tests.Assert(t, c != nil)
	cluster, err := c.ClusterCreate()
	tests.Assert(t, err == nil)

	// Create node request packet
	nodeReq := &glusterfs.NodeAddRequest{}
	nodeReq.ClusterId = cluster.Id
	nodeReq.Hostnames.Manage = []string{"manage"}
	nodeReq.Hostnames.Storage = []string{"storage"}
	nodeReq.Zone = 10

	// Create node
	node, err := c.NodeAdd(nodeReq)
	tests.Assert(t, err == nil)

	// Create a device request
	sg := utils.NewStatusGroup()
	for i := 0; i < 50; i++ {
		sg.Add(1)
		go func() {
			defer sg.Done()

			deviceReq := &glusterfs.DeviceAddRequest{}
			deviceReq.Name = "sd" + utils.GenUUID()[:8]
			deviceReq.Weight = 100
			deviceReq.NodeId = node.Id

			// Create device
			err := c.DeviceAdd(deviceReq)
			sg.Err(err)

		}()
	}
	tests.Assert(t, sg.Result() == nil)

	// Get list of volumes
	list, err := c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 0)

	// Create a volume
	volumeReq := &glusterfs.VolumeCreateRequest{}
	volumeReq.Size = 10
	volume, err := c.VolumeCreate(volumeReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volume.Id != "")
	tests.Assert(t, volume.Size == volumeReq.Size)

	// Get list of volumes
	list, err = c.VolumeList()
	tests.Assert(t, err == nil)
	tests.Assert(t, len(list.Volumes) == 1)
	tests.Assert(t, list.Volumes[0] == volume.Id)

	// Get info on incorrect id
	info, err := c.VolumeInfo("badid")
	tests.Assert(t, err != nil)

	// Get info
	info, err = c.VolumeInfo(volume.Id)
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(info, volume))

	// Expand volume with a bad id
	expandReq := &glusterfs.VolumeExpandRequest{}
	expandReq.Size = 10
	volumeInfo, err := c.VolumeExpand("badid", expandReq)
	tests.Assert(t, err != nil)

	// Expand volume
	volumeInfo, err = c.VolumeExpand(volume.Id, expandReq)
	tests.Assert(t, err == nil)
	tests.Assert(t, volumeInfo.Size == 20)

	// Delete bad id
	err = c.VolumeDelete("badid")
	tests.Assert(t, err != nil)

	// Delete volume
	err = c.VolumeDelete(volume.Id)
	tests.Assert(t, err == nil)

	// Get node information
	nodeInfo, err := c.NodeInfo(node.Id)
	tests.Assert(t, err == nil)

	// Delete all devices
	sg = utils.NewStatusGroup()
	for index := range nodeInfo.DevicesInfo {
		sg.Add(1)
		go func(i int) {
			defer sg.Done()
			sg.Err(c.DeviceDelete(nodeInfo.DevicesInfo[i].Id))
		}(index)
	}
	err = sg.Result()
	tests.Assert(t, err == nil, err)

	// Delete node
	err = c.NodeDelete(node.Id)
	tests.Assert(t, err == nil)

	// Delete cluster
	err = c.ClusterDelete(cluster.Id)
	tests.Assert(t, err == nil)

}
Beispiel #26
0
func NewClusterEntryFromRequest() *ClusterEntry {
	entry := NewClusterEntry()
	entry.Info.Id = utils.GenUUID()

	return entry
}
Beispiel #27
0
func TestNewDeviceEntrySaveDelete(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000
	d.BrickAdd("abc")
	d.BrickAdd("def")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return d.Save(tx)
	})
	tests.Assert(t, err == nil)

	var device *DeviceEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, reflect.DeepEqual(device, d))

	// Delete entry which has devices
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		err = device.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrConflict)

	// Delete devices in device
	device.BrickDelete("abc")
	device.BrickDelete("def")
	tests.Assert(t, len(device.Bricks) == 0)
	err = app.db.Update(func(tx *bolt.Tx) error {
		return device.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now try to delete the device
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		err = device.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)

	// Check device has been deleted and is not in db
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		device, err = NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == ErrNotFound)
}
Beispiel #28
0
func TestNewDeviceEntryNewInfoResponse(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a device
	req := &DeviceAddRequest{}
	req.NodeId = "abc"
	req.Name = "/dev/" + utils.GenUUID()
	req.Weight = 123

	d := NewDeviceEntryFromRequest(req)
	d.Info.Storage.Free = 10
	d.Info.Storage.Total = 100
	d.Info.Storage.Used = 1000

	// Create a brick
	b := &BrickEntry{}
	b.Info.Id = "bbb"
	b.Info.Size = 10
	b.Info.NodeId = "abc"
	b.Info.DeviceId = d.Info.Id
	b.Info.Path = "/somepath"

	// Add brick to device
	d.BrickAdd("bbb")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		err := d.Save(tx)
		if err != nil {
			return err
		}

		return b.Save(tx)
	})
	tests.Assert(t, err == nil)

	var info *DeviceInfoResponse
	err = app.db.View(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, d.Info.Id)
		if err != nil {
			return err
		}

		info, err = device.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, info.Id == d.Info.Id)
	tests.Assert(t, info.Name == d.Info.Name)
	tests.Assert(t, info.Weight == d.Info.Weight)
	tests.Assert(t, reflect.DeepEqual(info.Storage, d.Info.Storage))
	tests.Assert(t, len(info.Bricks) == 1)
	tests.Assert(t, info.Bricks[0].Id == "bbb")
	tests.Assert(t, info.Bricks[0].Path == "/somepath")
	tests.Assert(t, info.Bricks[0].NodeId == "abc")
	tests.Assert(t, info.Bricks[0].DeviceId == d.Info.Id)
	tests.Assert(t, info.Bricks[0].Size == 10)

}
Beispiel #29
0
func (v *VolumeEntry) allocBricks(
	db *bolt.DB,
	allocator Allocator,
	cluster string,
	bricksets int,
	brick_size uint64) (brick_entries []*BrickEntry, e error) {

	// Setup garbage collector function in case of error
	defer func() {

		// Check the named return value 'err'
		if e != nil {
			logger.Debug("Error detected.  Cleaning up volume %v: Len(%v) ", v.Info.Id, len(brick_entries))
			db.Update(func(tx *bolt.Tx) error {
				for _, brick := range brick_entries {
					v.removeBrickFromDb(tx, brick)
				}
				return nil
			})
		}
	}()

	// Initialize brick_entries
	brick_entries = make([]*BrickEntry, 0)

	// Determine allocation for each brick required for this volume
	for brick_num := 0; brick_num < bricksets; brick_num++ {
		logger.Info("brick_num: %v", brick_num)

		// Generate an id for the brick
		brickId := utils.GenUUID()

		// Get allocator generator
		// The same generator should be used for the brick and its replicas
		deviceCh, done, errc := allocator.GetNodes(cluster, brickId)
		defer func() {
			close(done)
		}()

		// Check location has space for each brick and its replicas
		for i := 0; i < v.Durability.BricksInSet(); i++ {
			logger.Info("%v / %v", i, v.Durability.BricksInSet())

			// Do the work in the database context so that the cluster
			// data does not change while determining brick location
			err := db.Update(func(tx *bolt.Tx) error {

				// Check the ring for devices to place the brick
				for deviceId := range deviceCh {

					// Get device entry
					device, err := NewDeviceEntryFromId(tx, deviceId)
					if err != nil {
						return err
					}

					// Try to allocate a brick on this device
					brick := device.NewBrickEntry(brick_size, float64(v.Info.Snapshot.Factor))

					// Determine if it was successful
					if brick != nil {

						// If the first in the set, the reset the id
						if i == 0 {
							brick.SetId(brickId)
						}

						// Save the brick entry to create in on the node
						brick_entries = append(brick_entries, brick)

						// Add brick to device
						device.BrickAdd(brick.Id())

						// Add brick to volume
						v.BrickAdd(brick.Id())

						// Save values
						err := device.Save(tx)
						if err != nil {
							return err
						}
						return nil
					}
				}

				// Check if allocator returned an error
				if err := <-errc; err != nil {
					return err
				}

				// No devices found
				return ErrNoSpace

			})
			if err != nil {
				return brick_entries, err
			}
		}
	}

	return brick_entries, nil

}