Пример #1
0
func TestVolumeInfo(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()
	router := mux.NewRouter()
	app.SetRoutes(router)

	// Setup the server
	ts := httptest.NewServer(router)
	defer ts.Close()

	// Setup database
	err := setupSampleDbWithTopology(app,
		1,    // clusters
		10,   // nodes_per_cluster
		10,   // devices_per_node,
		5*TB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Create a volume
	req := &api.VolumeCreateRequest{}
	req.Size = 100
	req.Durability.Type = api.DurabilityEC
	v := NewVolumeEntryFromRequest(req)
	tests.Assert(t, v != nil)
	err = v.Create(app.db, app.executor, app.allocator)
	tests.Assert(t, err == nil)

	// Now that we have some data in the database, we can
	// make a request for the clutser list
	r, err := http.Get(ts.URL + "/volumes/" + v.Info.Id)
	tests.Assert(t, r.StatusCode == http.StatusOK)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")

	// Read response
	var msg api.VolumeInfoResponse
	err = utils.GetJsonFromResponse(r, &msg)
	tests.Assert(t, err == nil)

	tests.Assert(t, msg.Id == v.Info.Id)
	tests.Assert(t, msg.Cluster == v.Info.Cluster)
	tests.Assert(t, msg.Name == v.Info.Name)
	tests.Assert(t, msg.Size == v.Info.Size)
	tests.Assert(t, reflect.DeepEqual(msg.Durability, v.Info.Durability))
	tests.Assert(t, reflect.DeepEqual(msg.Snapshot, v.Info.Snapshot))
	for _, brick := range msg.Bricks {
		tests.Assert(t, utils.SortedStringHas(v.Bricks, brick.Id))
	}
}
Пример #2
0
func TestNewClusterEntryFromId(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a cluster
	c := NewClusterEntryFromRequest()
	c.NodeAdd("node_abc")
	c.NodeAdd("node_def")
	c.VolumeAdd("vol_abc")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return c.Save(tx)
	})
	tests.Assert(t, err == nil)

	var cluster *ClusterEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)

	tests.Assert(t, cluster.Info.Id == c.Info.Id)
	tests.Assert(t, len(c.Info.Nodes) == 2)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "node_abc"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "node_def"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "vol_abc"))

}
Пример #3
0
func TestDeviceEntryAddDeleteBricks(t *testing.T) {
	d := NewDeviceEntry()
	tests.Assert(t, len(d.Bricks) == 0)

	d.BrickAdd("123")
	tests.Assert(t, utils.SortedStringHas(d.Bricks, "123"))
	tests.Assert(t, len(d.Bricks) == 1)
	d.BrickAdd("abc")
	tests.Assert(t, utils.SortedStringHas(d.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(d.Bricks, "abc"))
	tests.Assert(t, len(d.Bricks) == 2)

	d.BrickDelete("123")
	tests.Assert(t, !utils.SortedStringHas(d.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(d.Bricks, "abc"))
	tests.Assert(t, len(d.Bricks) == 1)

	d.BrickDelete("ccc")
	tests.Assert(t, !utils.SortedStringHas(d.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(d.Bricks, "abc"))
	tests.Assert(t, len(d.Bricks) == 1)
}
Пример #4
0
func TestNodeEntryAddDeleteDevices(t *testing.T) {
	n := NewNodeEntry()
	tests.Assert(t, len(n.Devices) == 0)

	n.DeviceAdd("123")
	tests.Assert(t, utils.SortedStringHas(n.Devices, "123"))
	tests.Assert(t, len(n.Devices) == 1)
	n.DeviceAdd("abc")
	tests.Assert(t, utils.SortedStringHas(n.Devices, "123"))
	tests.Assert(t, utils.SortedStringHas(n.Devices, "abc"))
	tests.Assert(t, len(n.Devices) == 2)

	n.DeviceDelete("123")
	tests.Assert(t, !utils.SortedStringHas(n.Devices, "123"))
	tests.Assert(t, utils.SortedStringHas(n.Devices, "abc"))
	tests.Assert(t, len(n.Devices) == 1)

	n.DeviceDelete("ccc")
	tests.Assert(t, !utils.SortedStringHas(n.Devices, "123"))
	tests.Assert(t, utils.SortedStringHas(n.Devices, "abc"))
	tests.Assert(t, len(n.Devices) == 1)
}
Пример #5
0
func TestVolumeEntryAddDeleteDevices(t *testing.T) {

	v := NewVolumeEntry()
	tests.Assert(t, len(v.Bricks) == 0)

	v.BrickAdd("123")
	tests.Assert(t, utils.SortedStringHas(v.Bricks, "123"))
	tests.Assert(t, len(v.Bricks) == 1)
	v.BrickAdd("abc")
	tests.Assert(t, utils.SortedStringHas(v.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(v.Bricks, "abc"))
	tests.Assert(t, len(v.Bricks) == 2)

	v.BrickDelete("123")
	tests.Assert(t, !utils.SortedStringHas(v.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(v.Bricks, "abc"))
	tests.Assert(t, len(v.Bricks) == 1)

	v.BrickDelete("ccc")
	tests.Assert(t, !utils.SortedStringHas(v.Bricks, "123"))
	tests.Assert(t, utils.SortedStringHas(v.Bricks, "abc"))
	tests.Assert(t, len(v.Bricks) == 1)
}
Пример #6
0
func TestClusterEntryAddDeleteElements(t *testing.T) {
	c := NewClusterEntry()

	c.NodeAdd("123")
	tests.Assert(t, len(c.Info.Nodes) == 1)
	tests.Assert(t, len(c.Info.Volumes) == 0)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "123"))

	c.NodeAdd("456")
	tests.Assert(t, len(c.Info.Nodes) == 2)
	tests.Assert(t, len(c.Info.Volumes) == 0)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "456"))

	c.VolumeAdd("aabb")
	tests.Assert(t, len(c.Info.Nodes) == 2)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "aabb"))

	c.NodeDelete("aabb")
	tests.Assert(t, len(c.Info.Nodes) == 2)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "aabb"))

	c.NodeDelete("456")
	tests.Assert(t, len(c.Info.Nodes) == 1)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "aabb"))

	c.NodeDelete("123")
	tests.Assert(t, len(c.Info.Nodes) == 0)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "aabb"))

	c.VolumeDelete("123")
	tests.Assert(t, len(c.Info.Nodes) == 0)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "aabb"))

	c.VolumeDelete("aabb")
	tests.Assert(t, len(c.Info.Nodes) == 0)
	tests.Assert(t, len(c.Info.Volumes) == 0)
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "123"))
	tests.Assert(t, !utils.SortedStringHas(c.Info.Nodes, "456"))
	tests.Assert(t, !utils.SortedStringHas(c.Info.Volumes, "aabb"))
}
Пример #7
0
func TestNewClusterEntrySaveDelete(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a cluster
	c := NewClusterEntryFromRequest()
	c.NodeAdd("node_abc")
	c.NodeAdd("node_def")
	c.VolumeAdd("vol_abc")

	// Save element in database
	err := app.db.Update(func(tx *bolt.Tx) error {
		return c.Save(tx)
	})
	tests.Assert(t, err == nil)

	var cluster *ClusterEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == nil)

	tests.Assert(t, cluster.Info.Id == c.Info.Id)
	tests.Assert(t, len(c.Info.Nodes) == 2)
	tests.Assert(t, len(c.Info.Volumes) == 1)
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "node_abc"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Nodes, "node_def"))
	tests.Assert(t, utils.SortedStringHas(c.Info.Volumes, "vol_abc"))

	// Delete entry which has devices
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}

		err = cluster.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrConflict)

	// Delete devices in cluster
	cluster.VolumeDelete("vol_abc")
	tests.Assert(t, len(cluster.Info.Volumes) == 0)
	tests.Assert(t, len(cluster.Info.Nodes) == 2)

	// Save cluster
	err = app.db.Update(func(tx *bolt.Tx) error {
		return cluster.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Try do delete a cluster which still has nodes
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}

		err = cluster.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == ErrConflict)

	// Delete cluster
	cluster.NodeDelete("node_abc")
	cluster.NodeDelete("node_def")
	tests.Assert(t, len(cluster.Info.Nodes) == 0)

	// Save cluster
	err = app.db.Update(func(tx *bolt.Tx) error {
		return cluster.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now try to delete the cluster with no elements
	err = app.db.Update(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}

		err = cluster.Delete(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)

	// Check cluster has been deleted and is not in db
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		cluster, err = NewClusterEntryFromId(tx, c.Info.Id)
		if err != nil {
			return err
		}
		return nil

	})
	tests.Assert(t, err == ErrNotFound)
}
Пример #8
0
func TestVolumeEntryCreateOnClustersRequested(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create 50TB of storage
	err := setupSampleDbWithTopology(app,
		10,   // clusters
		10,   // nodes_per_cluster
		10,   // devices_per_node,
		5*TB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Get a cluster list
	var clusters sort.StringSlice
	err = app.db.View(func(tx *bolt.Tx) error {
		var err error
		clusters, err = ClusterList(tx)
		return err
	})
	tests.Assert(t, err == nil)
	clusters.Sort()

	// Create a 1TB volume
	v := createSampleVolumeEntry(1024)

	// Set the clusters to the first two cluster ids
	v.Info.Clusters = []string{clusters[0]}

	// Create volume
	err = v.Create(app.db, app.executor, app.allocator)
	tests.Assert(t, err == nil)

	// Check database volume does not exist
	var info *api.VolumeInfoResponse
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err := NewVolumeEntryFromId(tx, v.Info.Id)
		if err != nil {
			return err
		}

		info, err = entry.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, info.Cluster == clusters[0])

	// Create a new volume on either of three clusters
	clusterset := clusters[2:5]
	v = createSampleVolumeEntry(1024)
	v.Info.Clusters = clusterset
	err = v.Create(app.db, app.executor, app.allocator)
	tests.Assert(t, err == nil)

	// Check database volume exists
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err := NewVolumeEntryFromId(tx, v.Info.Id)
		if err != nil {
			return err
		}

		info, err = entry.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, utils.SortedStringHas(clusterset, info.Cluster))

}
Пример #9
0
func TestVolumeEntryCreateBrickDivision(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create 50TB of storage
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		10,     // nodes_per_cluster
		10,     // devices_per_node,
		500*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Create a volume who will be broken down to
	// Shouldn't be able to break it down enough to allocate volume
	v := createSampleVolumeEntry(2000)
	err = v.Create(app.db, app.executor, app.allocator)
	tests.Assert(t, err == nil)

	// Check database volume does not exist
	var info *api.VolumeInfoResponse
	var nodelist sort.StringSlice
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err := NewVolumeEntryFromId(tx, v.Info.Id)
		if err != nil {
			return err
		}

		info, err = entry.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		cluster, err := NewClusterEntryFromId(tx, v.Info.Cluster)
		if err != nil {
			return err
		}
		nodelist = make(sort.StringSlice, len(cluster.Info.Nodes))

		for i, id := range cluster.Info.Nodes {
			node, err := NewNodeEntryFromId(tx, id)
			if err != nil {
				return err
			}
			nodelist[i] = node.StorageHostName()
		}
		nodelist.Sort()

		return nil

	})
	tests.Assert(t, err == nil)

	// Will need 3 splits for a total of 8 bricks + replicas
	tests.Assert(t, len(info.Bricks) == 16)
	for b := 1; b < 16; b++ {
		tests.Assert(t, info.Bricks[0].Size == info.Bricks[b].Size, b)
	}
	tests.Assert(t, info.Cluster == v.Info.Cluster)

	// Check mount information
	host := strings.Split(info.Mount.GlusterFS.MountPoint, ":")[0]
	tests.Assert(t, utils.SortedStringHas(nodelist, host), host, nodelist)
	volfileServers := strings.Split(info.Mount.GlusterFS.Options["backup-volfile-servers"], ",")
	for index, node := range volfileServers {
		tests.Assert(t, node != host, index, node, host)
	}

}
Пример #10
0
func TestVolumeEntryCreateFourBricks(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()

	// Create a cluster in the database
	err := setupSampleDbWithTopology(app,
		1,      // clusters
		4,      // nodes_per_cluster
		4,      // devices_per_node,
		500*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Create a volume who will be broken down to
	v := createSampleVolumeEntry(250)
	err = v.Create(app.db, app.executor, app.allocator)
	tests.Assert(t, err == nil, err)

	// Check database
	var info *api.VolumeInfoResponse
	var nodelist sort.StringSlice
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err := NewVolumeEntryFromId(tx, v.Info.Id)
		if err != nil {
			return err
		}

		info, err = entry.NewInfoResponse(tx)
		if err != nil {
			return err
		}

		cluster, err := NewClusterEntryFromId(tx, v.Info.Cluster)
		if err != nil {
			return err
		}
		nodelist = make(sort.StringSlice, len(cluster.Info.Nodes))

		for i, id := range cluster.Info.Nodes {
			node, err := NewNodeEntryFromId(tx, id)
			if err != nil {
				return err
			}
			nodelist[i] = node.StorageHostName()
		}
		nodelist.Sort()

		return nil

	})
	tests.Assert(t, err == nil)

	// Check that it used only two bricks each with only two replicas
	tests.Assert(t, len(info.Bricks) == 4)
	tests.Assert(t, info.Bricks[0].Size == info.Bricks[1].Size)
	tests.Assert(t, info.Bricks[0].Size == info.Bricks[2].Size)
	tests.Assert(t, info.Bricks[0].Size == info.Bricks[3].Size)
	tests.Assert(t, info.Cluster == v.Info.Cluster)

	// Check information on the bricks
	for _, brick := range info.Bricks {
		tests.Assert(t, brick.DeviceId != "")
		tests.Assert(t, brick.NodeId != "")
		tests.Assert(t, brick.Path != "")
	}

	// Check mount information
	host := strings.Split(info.Mount.GlusterFS.MountPoint, ":")[0]
	tests.Assert(t, utils.SortedStringHas(nodelist, host), host, nodelist)
	volfileServers := strings.Split(info.Mount.GlusterFS.Options["backup-volfile-servers"], ",")
	for index, node := range volfileServers {
		tests.Assert(t, node != host, index, node, host)
	}

	// Should have at least the number nodes as replicas
	tests.Assert(t, len(info.Mount.GlusterFS.Hosts) >= info.Durability.Replicate.Replica,
		info.Mount.GlusterFS.Hosts,
		info)

	// Check all hosts are in the list
	err = app.db.View(func(tx *bolt.Tx) error {
		for _, brick := range info.Bricks {
			found := false

			node, err := NewNodeEntryFromId(tx, brick.NodeId)
			tests.Assert(t, err == nil)

			for _, host := range info.Mount.GlusterFS.Hosts {
				if host == node.StorageHostName() {
					found = true
					break
				}
			}
			tests.Assert(t, found, node.StorageHostName(),
				info.Mount.GlusterFS.Hosts)
		}

		return nil
	})

}
Пример #11
0
func (d *DeviceEntry) BrickAdd(id string) {
	godbc.Require(!utils.SortedStringHas(d.Bricks, id))

	d.Bricks = append(d.Bricks, id)
	d.Bricks.Sort()
}
Пример #12
0
func TestDeviceAddDelete(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()
	router := mux.NewRouter()
	app.SetRoutes(router)

	// Setup the server
	ts := httptest.NewServer(router)
	defer ts.Close()

	// Add Cluster then a Node on the cluster
	// node
	cluster := NewClusterEntryFromRequest()
	nodereq := &api.NodeAddRequest{
		ClusterId: cluster.Info.Id,
		Hostnames: api.HostAddresses{
			Manage:  []string{"manage"},
			Storage: []string{"storage"},
		},
		Zone: 99,
	}
	node := NewNodeEntryFromRequest(nodereq)
	cluster.NodeAdd(node.Info.Id)

	// Save information in the db
	err := app.db.Update(func(tx *bolt.Tx) error {
		err := cluster.Save(tx)
		if err != nil {
			return err
		}

		err = node.Save(tx)
		if err != nil {
			return err
		}
		return nil
	})
	tests.Assert(t, err == nil)

	// Create a request to a device
	request := []byte(`{
        "node" : "` + node.Info.Id + `",
        "name" : "/dev/fake1"
    }`)

	// Add device using POST
	r, err := http.Post(ts.URL+"/devices", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err := r.Location()
	tests.Assert(t, err == nil)

	// Query queue until finished
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
		} else {
			tests.Assert(t, r.StatusCode == http.StatusNoContent)
			break
		}
	}

	// Add the same device.  It should conflict
	r, err = http.Post(ts.URL+"/devices", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusConflict)

	// Add a second device
	request = []byte(`{
        "node" : "` + node.Info.Id + `",
        "name" : "/dev/fake2"
    }`)

	// Add device using POST
	r, err = http.Post(ts.URL+"/devices", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err = r.Location()
	tests.Assert(t, err == nil)

	// Query queue until finished
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
		} else {
			tests.Assert(t, r.StatusCode == http.StatusNoContent)
			break
		}
	}

	// Check db to make sure devices where added
	devicemap := make(map[string]*DeviceEntry)
	err = app.db.View(func(tx *bolt.Tx) error {
		node, err = NewNodeEntryFromId(tx, node.Info.Id)
		if err != nil {
			return err
		}

		for _, id := range node.Devices {
			device, err := NewDeviceEntryFromId(tx, id)
			if err != nil {
				return err
			}
			devicemap[device.Info.Name] = device
		}

		return nil
	})
	tests.Assert(t, err == nil)

	val, ok := devicemap["/dev/fake1"]
	tests.Assert(t, ok)
	tests.Assert(t, val.Info.Name == "/dev/fake1")
	tests.Assert(t, len(val.Bricks) == 0)

	val, ok = devicemap["/dev/fake2"]
	tests.Assert(t, ok)
	tests.Assert(t, val.Info.Name == "/dev/fake2")
	tests.Assert(t, len(val.Bricks) == 0)

	// Add some bricks to check if delete conflicts works
	fakeid := devicemap["/dev/fake1"].Info.Id
	err = app.db.Update(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, fakeid)
		if err != nil {
			return err
		}

		device.BrickAdd("123")
		device.BrickAdd("456")
		return device.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now delete device and check for conflict
	req, err := http.NewRequest("DELETE", ts.URL+"/devices/"+fakeid, nil)
	tests.Assert(t, err == nil)
	r, err = http.DefaultClient.Do(req)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusConflict)

	// Check the db is still intact
	err = app.db.View(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, fakeid)
		if err != nil {
			return err
		}

		node, err = NewNodeEntryFromId(tx, device.NodeId)
		if err != nil {
			return err
		}

		return nil
	})
	tests.Assert(t, err == nil)
	tests.Assert(t, utils.SortedStringHas(node.Devices, fakeid))

	// Node delete bricks from the device
	err = app.db.Update(func(tx *bolt.Tx) error {
		device, err := NewDeviceEntryFromId(tx, fakeid)
		if err != nil {
			return err
		}

		device.BrickDelete("123")
		device.BrickDelete("456")
		return device.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Delete device
	req, err = http.NewRequest("DELETE", ts.URL+"/devices/"+fakeid, nil)
	tests.Assert(t, err == nil)
	r, err = http.DefaultClient.Do(req)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err = r.Location()
	tests.Assert(t, err == nil)

	// Wait for deletion
	for {
		r, err := http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
			continue
		} else {
			tests.Assert(t, r.StatusCode == http.StatusNoContent)
			break
		}
	}

	// Check db
	err = app.db.View(func(tx *bolt.Tx) error {
		_, err := NewDeviceEntryFromId(tx, fakeid)
		return err
	})
	tests.Assert(t, err == ErrNotFound)

	// Check node does not have the device
	err = app.db.View(func(tx *bolt.Tx) error {
		node, err = NewNodeEntryFromId(tx, node.Info.Id)
		return err
	})
	tests.Assert(t, err == nil)
	tests.Assert(t, !utils.SortedStringHas(node.Devices, fakeid))

	// Check the registration of the device has been removed,
	// and the device can be added again
	request = []byte(`{
        "node" : "` + node.Info.Id + `",
        "name" : "/dev/fake1"
    }`)
	r, err = http.Post(ts.URL+"/devices", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err = r.Location()
	tests.Assert(t, err == nil)

	// Query queue until finished
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
		} else {
			tests.Assert(t, r.StatusCode == http.StatusNoContent)
			break
		}
	}
}
Пример #13
0
func (v *VolumeEntry) BrickAdd(id string) {
	godbc.Require(!utils.SortedStringHas(v.Bricks, id))

	v.Bricks = append(v.Bricks, id)
	v.Bricks.Sort()
}
Пример #14
0
func (n *NodeEntry) DeviceAdd(id string) {
	godbc.Require(!utils.SortedStringHas(n.Devices, id))

	n.Devices = append(n.Devices, id)
	n.Devices.Sort()
}
Пример #15
0
func TestNodePeerDetachFailure(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()
	router := mux.NewRouter()
	app.SetRoutes(router)

	// Setup the server
	ts := httptest.NewServer(router)
	defer ts.Close()

	// Create a cluster.  We do not want
	// any drives in the node so we can delete easily
	err := setupSampleDbWithTopology(app,
		1,     // clusters
		4,     // nodes_per_cluster
		0,     // devices_per_node,
		50*GB, // disksize)
	)
	tests.Assert(t, err == nil)

	// Setup the mock peer probe to fail
	peer_called := false
	peer_calls := 0
	app.xo.MockPeerDetach = func(exec_host, newnode string) error {
		peer_calls++
		peer_called = true
		return errors.New("Mock")
	}

	// Get a node id
	var nodeid string
	err = app.db.View(func(tx *bolt.Tx) error {
		clusterlist, err := ClusterList(tx)
		if err != nil {
			return err
		}

		cluster, err := NewClusterEntryFromId(tx, clusterlist[0])
		if err != nil {
			return err
		}

		nodeid = cluster.Info.Nodes[0]

		return nil

	})
	tests.Assert(t, err == nil)
	tests.Assert(t, nodeid != "")

	// Delete node
	req, err := http.NewRequest("DELETE", ts.URL+"/nodes/"+nodeid, nil)
	tests.Assert(t, err == nil)
	r, err := http.DefaultClient.Do(req)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err := r.Location()
	tests.Assert(t, err == nil)

	// Since we forced the MockPeerDetach above to fail, the request should fail
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
		} else {
			tests.Assert(t, r.StatusCode == http.StatusInternalServerError)
			s, err := utils.GetStringFromResponse(r)
			tests.Assert(t, err == nil)
			tests.Assert(t, strings.TrimSpace(s) == "Mock")
			tests.Assert(t, peer_called == true)
			tests.Assert(t, peer_calls == 1)
			break
		}
	}

	// Check that the node is still in the db
	err = app.db.View(func(tx *bolt.Tx) error {
		clusters, err := ClusterList(tx)
		if err != nil {
			return err
		}

		cluster, err := NewClusterEntryFromId(tx, clusters[0])
		if err != nil {
			return err
		}
		tests.Assert(t, utils.SortedStringHas(cluster.Info.Nodes, nodeid))

		_, err = NewNodeEntryFromId(tx, nodeid)
		return err
	})
	tests.Assert(t, err == nil)
}
Пример #16
0
func TestNodeAddDelete(t *testing.T) {
	tmpfile := tests.Tempfile()
	defer os.Remove(tmpfile)

	// Create the app
	app := NewTestApp(tmpfile)
	defer app.Close()
	router := mux.NewRouter()
	app.SetRoutes(router)

	// Setup the server
	ts := httptest.NewServer(router)
	defer ts.Close()

	// ClusterCreate JSON Request
	request := []byte(`{
    }`)

	// Post nothing
	r, err := http.Post(ts.URL+"/clusters", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusCreated)
	tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")

	// Read cluster information
	var clusterinfo api.ClusterInfoResponse
	err = utils.GetJsonFromResponse(r, &clusterinfo)
	tests.Assert(t, err == nil)

	// Create node on this cluster
	request = []byte(`{
		"cluster" : "` + clusterinfo.Id + `",
		"hostnames" : {
			"storage" : [ "storage.hostname.com" ],
			"manage" : [ "manage.hostname.com"  ]
		},
		"zone" : 1
    }`)

	// Create node
	r, err = http.Post(ts.URL+"/nodes", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err := r.Location()
	tests.Assert(t, err == nil)

	// Query queue until finished
	var node api.NodeInfoResponse
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		tests.Assert(t, r.StatusCode == http.StatusOK)
		if r.ContentLength <= 0 {
			time.Sleep(time.Millisecond * 10)
			continue
		} else {
			// Should have node information here
			tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")
			err = utils.GetJsonFromResponse(r, &node)
			tests.Assert(t, err == nil)
			break
		}
	}
	tests.Assert(t, len(node.Id) > 0)
	tests.Assert(t, len(node.Hostnames.Manage) == 1)
	tests.Assert(t, len(node.Hostnames.Storage) == 1)
	tests.Assert(t, node.Hostnames.Manage[0] == "manage.hostname.com")
	tests.Assert(t, node.Hostnames.Storage[0] == "storage.hostname.com")
	tests.Assert(t, node.Zone == 1)
	tests.Assert(t, node.ClusterId == clusterinfo.Id)
	tests.Assert(t, len(node.DevicesInfo) == 0)

	// Check that the node has registered
	err = app.db.View(func(tx *bolt.Tx) error {
		b := tx.Bucket([]byte(BOLTDB_BUCKET_NODE))
		tests.Assert(t, b != nil)

		val := b.Get([]byte("STORAGE" + node.Hostnames.Storage[0]))
		tests.Assert(t, string(val) == node.Id)

		val = b.Get([]byte("MANAGE" + node.Hostnames.Manage[0]))
		tests.Assert(t, string(val) == node.Id)

		return nil
	})
	tests.Assert(t, err == nil)

	//---- OK, now it should have been registered
	// now let's add it again
	// It should return a conflict
	r, err = http.Post(ts.URL+"/nodes", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusConflict)
	tests.Assert(t, err == nil)

	// Check Cluster has node
	r, err = http.Get(ts.URL + "/clusters/" + clusterinfo.Id)
	tests.Assert(t, r.StatusCode == http.StatusOK)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")

	err = utils.GetJsonFromResponse(r, &clusterinfo)
	tests.Assert(t, len(clusterinfo.Nodes) == 1)
	tests.Assert(t, clusterinfo.Nodes[0] == node.Id)

	// Check the data is in the database correctly
	var entry *NodeEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err = NewNodeEntryFromId(tx, node.Id)
		return err
	})
	tests.Assert(t, err == nil)
	tests.Assert(t, entry != nil)
	tests.Assert(t, entry.Info.Id == node.Id)
	tests.Assert(t, len(entry.Info.Hostnames.Manage) == 1)
	tests.Assert(t, len(entry.Info.Hostnames.Storage) == 1)
	tests.Assert(t, entry.Info.Hostnames.Manage[0] == node.Hostnames.Manage[0])
	tests.Assert(t, entry.Info.Hostnames.Storage[0] == node.Hostnames.Storage[0])
	tests.Assert(t, len(entry.Devices) == 0)

	// Add some devices to check if delete conflict works
	err = app.db.Update(func(tx *bolt.Tx) error {
		entry, err = NewNodeEntryFromId(tx, node.Id)
		if err != nil {
			return err
		}

		entry.DeviceAdd("123")
		entry.DeviceAdd("456")
		return entry.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now delete node and check for conflict
	req, err := http.NewRequest("DELETE", ts.URL+"/nodes/"+node.Id, nil)
	tests.Assert(t, err == nil)
	r, err = http.DefaultClient.Do(req)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusConflict)

	// Check that nothing has changed in the db
	var cluster *ClusterEntry
	err = app.db.View(func(tx *bolt.Tx) error {
		entry, err = NewNodeEntryFromId(tx, node.Id)
		if err != nil {
			return err
		}

		cluster, err = NewClusterEntryFromId(tx, entry.Info.ClusterId)
		if err != nil {
			return err
		}

		return nil
	})
	tests.Assert(t, err == nil)
	tests.Assert(t, utils.SortedStringHas(cluster.Info.Nodes, node.Id))

	// Node delete the drives
	err = app.db.Update(func(tx *bolt.Tx) error {
		entry, err = NewNodeEntryFromId(tx, node.Id)
		if err != nil {
			return err
		}

		entry.DeviceDelete("123")
		entry.DeviceDelete("456")
		return entry.Save(tx)
	})
	tests.Assert(t, err == nil)

	// Now delete node
	req, err = http.NewRequest("DELETE", ts.URL+"/nodes/"+node.Id, nil)
	tests.Assert(t, err == nil)
	r, err = http.DefaultClient.Do(req)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err = r.Location()
	tests.Assert(t, err == nil)

	// Wait for deletion
	for {
		r, err := http.Get(location.String())
		tests.Assert(t, err == nil)
		if r.Header.Get("X-Pending") == "true" {
			tests.Assert(t, r.StatusCode == http.StatusOK)
			time.Sleep(time.Millisecond * 10)
			continue
		} else {
			tests.Assert(t, r.StatusCode == http.StatusNoContent)
			break
		}
	}

	// Check db to make sure key is removed
	err = app.db.View(func(tx *bolt.Tx) error {
		_, err = NewNodeEntryFromId(tx, node.Id)
		return err
	})
	tests.Assert(t, err == ErrNotFound)

	// Check the cluster does not have this node id
	r, err = http.Get(ts.URL + "/clusters/" + clusterinfo.Id)
	tests.Assert(t, r.StatusCode == http.StatusOK)
	tests.Assert(t, err == nil)
	tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")

	err = utils.GetJsonFromResponse(r, &clusterinfo)
	tests.Assert(t, len(clusterinfo.Nodes) == 0)

	// It should have deregistered the node
	// We should be able to add it again
	r, err = http.Post(ts.URL+"/nodes", "application/json", bytes.NewBuffer(request))
	tests.Assert(t, err == nil)
	tests.Assert(t, r.StatusCode == http.StatusAccepted)
	location, err = r.Location()
	tests.Assert(t, err == nil)

	// Query queue until finished
	for {
		r, err = http.Get(location.String())
		tests.Assert(t, err == nil)
		tests.Assert(t, r.StatusCode == http.StatusOK)
		if r.ContentLength <= 0 {
			time.Sleep(time.Millisecond * 10)
			continue
		} else {
			// Should have node information here
			tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8")
			err = utils.GetJsonFromResponse(r, &node)
			tests.Assert(t, err == nil)
			break
		}
	}
}