func TestVolumeEntryNameConflictMultiCluster(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create 10 clusters err := setupSampleDbWithTopology(app, 10, // clusters 3, // nodes_per_cluster 6, // devices_per_node, 6*TB, // disksize) ) tests.Assert(t, err == nil) // Create 10 volumes for i := 0; i < 10; i++ { v := createSampleVolumeEntry(1024) v.Info.Name = "myvol" err = v.Create(app.db, app.executor, app.allocator) tests.Assert(t, err == nil) } // Create another volume same name v := createSampleVolumeEntry(10000) v.Info.Name = "myvol" err = v.Create(app.db, app.executor, app.allocator) tests.Assert(t, err != nil, err) }
func TestReplicaDurabilityDefaults(t *testing.T) { r := &ReplicaDurability{} tests.Assert(t, r.Replica == 0) r.SetDurability() tests.Assert(t, r.Replica == DEFAULT_REPLICA) }
func TestVolumeExpandIdNotFound(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() router := mux.NewRouter() app.SetRoutes(router) // Setup the server ts := httptest.NewServer(router) defer ts.Close() // JSON Request request := []byte(`{ "expand_size" : 100 }`) // Now that we have some data in the database, we can // make a request for the clutser list r, err := http.Post(ts.URL+"/volumes/12345/expand", "application/json", bytes.NewBuffer(request)) tests.Assert(t, err == nil) tests.Assert(t, r.StatusCode == http.StatusNotFound, r.StatusCode) body, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) tests.Assert(t, err == nil) r.Body.Close() tests.Assert(t, strings.Contains(string(body), "Id not found")) }
func TestVolumeCreateSmallSize(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() router := mux.NewRouter() app.SetRoutes(router) // Setup the server ts := httptest.NewServer(router) defer ts.Close() // VolumeCreate JSON Request request := []byte(`{ "size" : 0 }`) // Send request r, err := http.Post(ts.URL+"/volumes", "application/json", bytes.NewBuffer(request)) tests.Assert(t, err == nil) tests.Assert(t, r.StatusCode == http.StatusBadRequest) body, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) tests.Assert(t, err == nil) r.Body.Close() tests.Assert(t, strings.Contains(string(body), "Invalid volume size")) }
func TestVolumeListEmpty(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() router := mux.NewRouter() app.SetRoutes(router) // Setup the server ts := httptest.NewServer(router) defer ts.Close() // Get volumes, there should be none r, err := http.Get(ts.URL + "/volumes") tests.Assert(t, r.StatusCode == http.StatusOK) tests.Assert(t, err == nil) tests.Assert(t, r.Header.Get("Content-Type") == "application/json; charset=UTF-8") // Read response var msg VolumeListResponse err = utils.GetJsonFromResponse(r, &msg) tests.Assert(t, err == nil) tests.Assert(t, len(msg.Volumes) == 0) }
func TestNewBrickEntryFromId(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create a brick b := NewBrickEntry(10, 20, 5, "abc", "def") // Save element in database err := app.db.Update(func(tx *bolt.Tx) error { return b.Save(tx) }) tests.Assert(t, err == nil) var brick *BrickEntry err = app.db.View(func(tx *bolt.Tx) error { var err error brick, err = NewBrickEntryFromId(tx, b.Info.Id) return err }) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(brick, b)) }
func TestNewBrickEntryNewInfoResponse(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create a brick b := NewBrickEntry(10, 20, 5, "abc", "def") // Save element in database err := app.db.Update(func(tx *bolt.Tx) error { return b.Save(tx) }) tests.Assert(t, err == nil) var info *api.BrickInfo err = app.db.View(func(tx *bolt.Tx) error { brick, err := NewBrickEntryFromId(tx, b.Id()) if err != nil { return err } info, err = brick.NewInfoResponse(tx) return err }) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(*info, b.Info)) }
func TestNoneDurabilityDefaults(t *testing.T) { r := &NoneDurability{} tests.Assert(t, r.Replica == 0) r.SetDurability() tests.Assert(t, r.Replica == 1) }
func TestVolumeEntryExpandMaxBrickLimit(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create a large cluster err := setupSampleDbWithTopology(app, 10, // clusters 4, // nodes_per_cluster 24, // devices_per_node, 600*GB, // disksize) ) tests.Assert(t, err == nil) // Create large volume v := createSampleVolumeEntry(100) err = v.Create(app.db, app.executor, app.allocator) tests.Assert(t, err == nil) // Add a bunch of bricks until the limit fakebricks := make(sort.StringSlice, BrickMaxNum-len(v.Bricks)) v.Bricks = append(v.Bricks, fakebricks...) // Try to expand the volume, but it will return that the max number // of bricks has been reached err = v.Expand(app.db, app.executor, app.allocator, 100) tests.Assert(t, err == ErrMaxBricks, err) }
func TestVolumeEntryFromId(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create a volume entry v := createSampleVolumeEntry(1024) // Save in database err := app.db.Update(func(tx *bolt.Tx) error { return v.Save(tx) }) tests.Assert(t, err == nil) // Load from database var entry *VolumeEntry err = app.db.View(func(tx *bolt.Tx) error { var err error entry, err = NewVolumeEntryFromId(tx, v.Info.Id) return err }) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(entry, v)) }
func TestNewStatusGroup(t *testing.T) { s := NewStatusGroup() tests.Assert(t, s != nil) tests.Assert(t, s.results != nil) tests.Assert(t, len(s.results) == 0) tests.Assert(t, s.err == nil) }
func setupCluster(t *testing.T) { tests.Assert(t, heketi != nil) nodespercluster := NODES / CLUSTERS nodes := getnodes() sg := utils.NewStatusGroup() for cluster := 0; cluster < CLUSTERS; cluster++ { sg.Add(1) go func(nodes_in_cluster []string) { defer sg.Done() // Create a cluster cluster, err := heketi.ClusterCreate() if err != nil { logger.Err(err) sg.Err(err) return } // Add nodes sequentially due to probes for index, hostname := range nodes_in_cluster { nodeReq := &glusterfs.NodeAddRequest{} nodeReq.ClusterId = cluster.Id nodeReq.Hostnames.Manage = []string{hostname} nodeReq.Hostnames.Storage = []string{hostname} nodeReq.Zone = index%ZONES + 1 node, err := heketi.NodeAdd(nodeReq) if err != nil { logger.Err(err) sg.Err(err) return } // Add devices all concurrently for _, disk := range getdisks() { sg.Add(1) go func(d string) { defer sg.Done() driveReq := &glusterfs.DeviceAddRequest{} driveReq.Name = d driveReq.Weight = 100 driveReq.NodeId = node.Id err := heketi.DeviceAdd(driveReq) if err != nil { logger.Err(err) sg.Err(err) } }(disk) } } }(nodes[cluster*nodespercluster : (cluster+1)*nodespercluster]) } // Wait here for results err := sg.Result() tests.Assert(t, err == nil) }
func TestAppAdvsettings(t *testing.T) { dbfile := tests.Tempfile() defer os.Remove(dbfile) os.Setenv("HEKETI_EXECUTOR", "mock") defer os.Unsetenv("HEKETI_EXECUTOR") data := []byte(`{ "glusterfs" : { "executor" : "crazyexec", "allocator" : "simple", "db" : "` + dbfile + `", "brick_max_size_gb" : 1024, "brick_min_size_gb" : 1, "max_bricks_per_volume" : 33 } }`) bmax, bmin, bnum := BrickMaxSize, BrickMinSize, BrickMaxNum defer func() { BrickMaxSize, BrickMinSize, BrickMaxNum = bmax, bmin, bnum }() app := NewApp(bytes.NewReader(data)) tests.Assert(t, app != nil) tests.Assert(t, app.conf.Executor == "mock") tests.Assert(t, BrickMaxNum == 33) tests.Assert(t, BrickMaxSize == 1*TB) tests.Assert(t, BrickMinSize == 1*GB) }
func TestNewSimpleAllocator(t *testing.T) { a := NewSimpleAllocator() tests.Assert(t, a != nil) tests.Assert(t, a.rings != nil) }
func TestNewNodeEntryMarshal(t *testing.T) { req := &api.NodeAddRequest{ ClusterId: "123", Hostnames: api.HostAddresses{ Manage: []string{"manage"}, Storage: []string{"storage"}, }, Zone: 99, } n := NewNodeEntryFromRequest(req) n.DeviceAdd("abc") n.DeviceAdd("def") buffer, err := n.Marshal() tests.Assert(t, err == nil) tests.Assert(t, buffer != nil) tests.Assert(t, len(buffer) > 0) um := &NodeEntry{} err = um.Unmarshal(buffer) tests.Assert(t, err == nil) tests.Assert(t, reflect.DeepEqual(n, um)) }
func TestDisperseDurability(t *testing.T) { r := &DisperseDurability{ Data: 8, Redundancy: 3, } gen := r.BrickSizeGenerator(200 * GB) // Gen 1 sets, brick_size, err := gen() tests.Assert(t, err == nil) tests.Assert(t, sets == 2) tests.Assert(t, brick_size == uint64(100*GB/8)) tests.Assert(t, 8+3 == r.BricksInSet()) // Gen 2 sets, brick_size, err = gen() tests.Assert(t, err == nil) tests.Assert(t, sets == 4) tests.Assert(t, brick_size == uint64(50*GB/8)) tests.Assert(t, 8+3 == r.BricksInSet()) // Gen 3 sets, brick_size, err = gen() tests.Assert(t, err == ErrMininumBrickSize) tests.Assert(t, 8+3 == r.BricksInSet()) }
func TestVolumeEntryDoNotAllowDeviceOnSameNode(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() // Create cluster with plenty of space, but // it will not have enough nodes err := setupSampleDbWithTopology(app, 1, // clusters 1, // nodes_per_cluster 200, // devices_per_node, 6*TB, // disksize) ) tests.Assert(t, err == nil) // Create volume v := createSampleVolumeEntry(100) err = v.Create(app.db, app.executor, app.allocator) tests.Assert(t, err != nil, err) tests.Assert(t, err == ErrNoSpace) v = createSampleVolumeEntry(10000) err = v.Create(app.db, app.executor, app.allocator) tests.Assert(t, err != nil, err) tests.Assert(t, err == ErrNoSpace) }
func TestNewSshExecBadPrivateKeyLocation(t *testing.T) { config := &SshConfig{} s, err := NewSshExecutor(config) tests.Assert(t, s == nil) tests.Assert(t, err != nil) }
func TestNewClusterEntry(t *testing.T) { c := NewClusterEntry() tests.Assert(t, c.Info.Id == "") tests.Assert(t, c.Info.Volumes != nil) tests.Assert(t, c.Info.Nodes != nil) tests.Assert(t, len(c.Info.Volumes) == 0) tests.Assert(t, len(c.Info.Nodes) == 0) }
func TestNewVolumeEntry(t *testing.T) { v := NewVolumeEntry() tests.Assert(t, v.Bricks != nil) tests.Assert(t, len(v.Info.Id) == 0) tests.Assert(t, len(v.Info.Cluster) == 0) tests.Assert(t, len(v.Info.Clusters) == 0) }
func TestNewNodeEntry(t *testing.T) { n := NewNodeEntry() tests.Assert(t, n.Info.Id == "") tests.Assert(t, n.Info.ClusterId == "") tests.Assert(t, len(n.Devices) == 0) tests.Assert(t, n.Devices != nil) }
func TestReplicaDurabilitySetExecutorRequest(t *testing.T) { r := &ReplicaDurability{} r.SetDurability() v := &executors.VolumeRequest{} r.SetExecutorVolumeRequest(v) tests.Assert(t, v.Replica == r.Replica) tests.Assert(t, v.Type == executors.DurabilityReplica) }
func TestDisperseDurabilityDefaults(t *testing.T) { r := &DisperseDurability{} tests.Assert(t, r.Data == 0) tests.Assert(t, r.Redundancy == 0) r.SetDurability() tests.Assert(t, r.Data == DEFAULT_EC_DATA) tests.Assert(t, r.Redundancy == DEFAULT_EC_REDUNDANCY) }
func TestNoneDurabilitySetExecutorRequest(t *testing.T) { r := &NoneDurability{} r.SetDurability() v := &executors.VolumeRequest{} r.SetExecutorVolumeRequest(v) tests.Assert(t, v.Replica == 1) tests.Assert(t, v.Type == executors.DurabilityNone) }
func TestDisperseDurabilitySetExecutorRequest(t *testing.T) { r := &DisperseDurability{} r.SetDurability() v := &executors.VolumeRequest{} r.SetExecutorVolumeRequest(v) tests.Assert(t, v.Data == r.Data) tests.Assert(t, v.Redundancy == r.Redundancy) tests.Assert(t, v.Type == executors.DurabilityDispersion) }
func TestNewJwtAuth(t *testing.T) { c := &JwtAuthConfig{} c.Admin.PrivateKey = "Key" c.User.PrivateKey = "UserKey" j := NewJwtAuth(c) tests.Assert(t, string(j.adminKey) == c.Admin.PrivateKey) tests.Assert(t, string(j.userKey) == c.User.PrivateKey) tests.Assert(t, j != nil) }
func TestNewKubeExecutorNoNamespace(t *testing.T) { config := &KubeConfig{ Host: "myhost", Sudo: true, Fstab: "myfstab", } k, err := NewKubeExecutor(config) tests.Assert(t, err != nil) tests.Assert(t, k == nil) }
func TestAppLogLevel(t *testing.T) { dbfile := tests.Tempfile() defer os.Remove(dbfile) levels := []string{ "none", "critical", "error", "warning", "info", "debug", } logger.SetLevel(utils.LEVEL_DEBUG) for _, level := range levels { data := []byte(`{ "glusterfs" : { "executor" : "mock", "allocator" : "simple", "db" : "` + dbfile + `", "loglevel" : "` + level + `" } }`) app := NewApp(bytes.NewReader(data)) tests.Assert(t, app != nil, level, string(data)) switch level { case "none": tests.Assert(t, logger.Level() == utils.LEVEL_NOLOG) case "critical": tests.Assert(t, logger.Level() == utils.LEVEL_CRITICAL) case "error": tests.Assert(t, logger.Level() == utils.LEVEL_ERROR) case "warning": tests.Assert(t, logger.Level() == utils.LEVEL_WARNING) case "info": tests.Assert(t, logger.Level() == utils.LEVEL_INFO) case "debug": tests.Assert(t, logger.Level() == utils.LEVEL_DEBUG) } app.Close() } // Test that an unknown value does not change the loglevel logger.SetLevel(utils.LEVEL_NOLOG) data := []byte(`{ "glusterfs" : { "executor" : "mock", "allocator" : "simple", "db" : "` + dbfile + `", "loglevel" : "blah" } }`) app := NewApp(bytes.NewReader(data)) tests.Assert(t, app != nil) tests.Assert(t, logger.Level() == utils.LEVEL_NOLOG) }
func TestReplicaDurabilityLargeBrickGenerator(t *testing.T) { r := &ReplicaDurability{ Replica: 2, } gen := r.BrickSizeGenerator(100 * TB) // Gen 1 sets, brick_size, err := gen() tests.Assert(t, err == nil) tests.Assert(t, sets == 32) tests.Assert(t, brick_size == 3200*GB) tests.Assert(t, 2 == r.BricksInSet()) }
func TestJwt(t *testing.T) { // Setup jwt c := &JwtAuthConfig{} c.Admin.PrivateKey = "Key" c.User.PrivateKey = "UserKey" j := NewJwtAuth(c) tests.Assert(t, j != nil) // Setup middleware framework n := negroni.New(j) tests.Assert(t, n != nil) // Create a simple middleware to check if it was called called := false mw := func(rw http.ResponseWriter, r *http.Request) { data := context.Get(r, "jwt") tests.Assert(t, data != nil) token := data.(*jwt.Token) tests.Assert(t, token.Claims["iss"] == "admin") called = true rw.WriteHeader(http.StatusOK) } n.UseHandlerFunc(mw) // Create test server ts := httptest.NewServer(n) // Create token with missing 'iss' claim token := jwt.New(jwt.SigningMethodHS256) token.Claims["iss"] = "admin" token.Claims["iat"] = time.Now().Unix() token.Claims["exp"] = time.Now().Add(time.Second * 10).Unix() // Generate qsh qshstring := "GET&/" hash := sha256.New() hash.Write([]byte(qshstring)) token.Claims["qsh"] = hex.EncodeToString(hash.Sum(nil)) tokenString, err := token.SignedString([]byte("Key")) tests.Assert(t, err == nil) // Setup header req, err := http.NewRequest("GET", ts.URL, nil) tests.Assert(t, err == nil) // Miss 'bearer' string req.Header.Set("Authorization", "bearer "+tokenString) r, err := http.DefaultClient.Do(req) tests.Assert(t, err == nil) tests.Assert(t, r.StatusCode == http.StatusOK) tests.Assert(t, called == true) }