func (s *TestSuite) TestCreateVolumeWithBaseImage(c *C) { var err error driver := s.driver imageID := uuid.New() err = driver.ActivateImage(imageID, s.imageFile) c.Assert(err, IsNil) volumeID := uuid.New() err = driver.CreateVolume(volumeID, imageID, volumeSize) c.Assert(err, IsNil) volumeDev, err := driver.GetVolumeDevice(volumeID) c.Assert(err, IsNil) err = exec.Command("mount", volumeDev, devMount).Run() c.Assert(err, IsNil) _, err = os.Stat(filepath.Join(devMount, imageTestFile)) c.Assert(err, IsNil) err = exec.Command("umount", devMount).Run() c.Assert(err, IsNil) err = driver.DeleteVolume(volumeID) c.Assert(err, IsNil) err = driver.DeactivateImage(imageID) c.Assert(err, IsNil) }
func (s *TestSuite) TestSnapshot(c *C) { var err error driver := s.driver volumeID := uuid.New() err = driver.CreateVolume(volumeID, "", volumeSize) c.Assert(err, IsNil) snapshotID := uuid.New() err = driver.CreateSnapshot(snapshotID, volumeID) c.Assert(err, IsNil) err = driver.CreateSnapshot(snapshotID, volumeID) c.Assert(err, Not(IsNil)) c.Assert(err, ErrorMatches, "Already has snapshot with uuid.*") snapshotID2 := uuid.New() err = driver.CreateSnapshot(snapshotID2, volumeID) c.Assert(err, IsNil) err = driver.DeleteSnapshot(snapshotID, volumeID) c.Assert(err, IsNil) err = driver.DeleteSnapshot(snapshotID, volumeID) c.Assert(err, Not(IsNil)) c.Assert(err, ErrorMatches, "cannot find snapshot.*") err = driver.DeleteVolume(volumeID) c.Assert(err, IsNil) }
func TestToolDoesNotExist(t *testing.T) { // Build the Queue with the SimpleTool timer q := Queue{authToken: "ResourceTest"} // Add the tool st := new(SimpleTimerTooler) st.SetUUID(uuid.New()) q.tools = append(q.tools, st) // Create the RPC server and bind the Queue listen := startRPCOnce("tcp", addr, &q) defer listen.Close() // Connect to the RPC server client, err := rpc.Dial("tcp", addr) if err != nil { t.Fatal("Error dailing RPC server.", err) } defer client.Close() // Setup the Job information to start the service params := map[string]string{"timer": "1"} j := common.NewJob(uuid.New(), "Testing Job", "GoTestSuite", params) // Create RPC call for starting a job startJob := common.RPCCall{Auth: "ResourceTest", Job: j} // Make call and expect an error of bad job err = client.Call("Queue.AddTask", startJob, &j) if err == nil || err.Error() != ERROR_NO_TOOL { t.Fatal("No tool error was not returned.", err) } }
func extractID(q string, keyword string) string { st, err := stripQuery(q) if err != nil || len(st) <= 0 { return uuid.New() } for _, v := range st { kve := strings.Split(v, ":") if len(kve) <= 0 { continue } ke, ve := strings.TrimSpace(kve[0]), strings.TrimSpace(kve[1]) if ke != keyword { continue } return ve } return uuid.New() }
func (r *raids) load(filename string) error { r.lock.Lock() defer r.lock.Unlock() defer r.cache() fp, err := os.Open(filename) if err != nil { if os.IsNotExist(err) { r.filename = filename return nil } return err } r.filename = filename defer fp.Close() dec := json.NewDecoder(fp) if err := dec.Decode(&r.data); err != nil { return err } if r.data == nil { r.data = map[string][]*raid{} } for c := range r.data { for i := range r.data[c] { if r.data[c][i].UUID == "" { r.data[c][i].UUID = uuid.New() r.data[c][i].Secret = uuid.New() } if r.data[c][i].Type == "" { r.data[c][i].Type = "event" } } } return nil }
func TestUpdateOne(t *testing.T) { db := NewInMemDatabase() instance := new(types.VirtualMachine) instance.SetUuid(uuid.New()) instance.SetName("instance") assert.NoError(t, db.Put(instance, nil, UIDList{})) vmi1 := new(types.VirtualMachineInterface) vmi1.SetUuid(uuid.New()) vmi1.SetName("port1") assert.NoError(t, db.Put(vmi1, nil, GetReferenceList(vmi1))) vmi1.AddVirtualMachine(instance) assert.NoError(t, db.Update(vmi1, GetReferenceList(vmi1))) result, err := db.GetBackReferences(parseUID(instance.GetUuid()), "virtual_machine_interface") assert.NoError(t, err) assert.Contains(t, result, parseUID(vmi1.GetUuid())) vmi1.ClearVirtualMachine() assert.NoError(t, db.Update(vmi1, GetReferenceList(vmi1))) result, err = db.GetBackReferences(parseUID(instance.GetUuid()), "virtual_machine_interface") assert.NoError(t, err) assert.Len(t, result, 0) }
// Create floating-ip with 2 vmi references, delete it and verify that // the back_refs are updated as expected. func TestDeleteRefs(t *testing.T) { db := NewInMemDatabase() vmi1 := new(types.VirtualMachineInterface) vmi1.SetUuid(uuid.New()) vmi1.SetName("port1") assert.NoError(t, db.Put(vmi1, nil, GetReferenceList(vmi1))) vmi2 := new(types.VirtualMachineInterface) vmi2.SetUuid(uuid.New()) vmi2.SetName("port2") assert.NoError(t, db.Put(vmi2, nil, GetReferenceList(vmi2))) fip := new(types.FloatingIp) fip.SetUuid(uuid.New()) fip.SetName("fip") fip.AddVirtualMachineInterface(vmi1) fip.AddVirtualMachineInterface(vmi2) assert.NoError(t, db.Put(fip, nil, GetReferenceList(fip))) assert.Error(t, db.Delete(vmi1)) result, err := db.GetBackReferences(parseUID(vmi2.GetUuid()), "floating_ip") assert.NoError(t, err) assert.Len(t, result, 1) assert.NoError(t, db.Delete(fip)) result, err = db.GetBackReferences(parseUID(vmi2.GetUuid()), "floating_ip") assert.NoError(t, err) assert.Len(t, result, 0) assert.NoError(t, db.Delete(vmi1)) assert.NoError(t, db.Delete(vmi2)) }
func (h uploadHandler) doUpload(fileheader *multipart.FileHeader) error { file, err := fileheader.Open() defer file.Close() if err != nil { return err } contentType := fileheader.Header["Content-Type"][0] edition := &models.Edition{ Id: uuid.New(), ContentType: contentType, } dstPath := path.Join(h.bookPath, edition.Path()) dst, err := os.Create(dstPath) if err != nil { return err } defer dst.Close() if _, err := io.Copy(dst, file); err != nil { return err } opened, err := fileheader.Open() if err != nil { return err } if contentType != models.MOBI && contentType != models.EPUB { return errors.New("Format not supported: " + contentType) } metaFunc := metadata.Epub if contentType == models.MOBI { metaFunc = metadata.Mobi } meta, _ := metaFunc(opened) newBook := models.Book{ Id: uuid.New(), Added: time.Now(), Title: meta.Title, Author: meta.Author, Editions: models.Editions{edition}, } h.db.Save(newBook) h.es.Add(newBook) go h.convert(contentType, newBook) return nil }
func (c *TokenGenAccessDefault) GenerateAccessToken(data *AccessTokenData) error { data.AccessToken = uuid.New() data.AccessToken = base64.StdEncoding.EncodeToString([]byte(data.AccessToken)) data.RefreshToken = uuid.New() data.RefreshToken = base64.StdEncoding.EncodeToString([]byte(data.RefreshToken)) return nil }
func (a *AccessTokenGenDefault) GenerateAccessToken(generaterefresh bool) (accesstoken string, refreshtoken string, err *HttpError) { accesstoken = uuid.New() accesstoken = base64.StdEncoding.EncodeToString([]byte(accesstoken)) if generaterefresh { refreshtoken = uuid.New() refreshtoken = base64.StdEncoding.EncodeToString([]byte(refreshtoken)) } return }
func (s *TestSuite) TestVolume(c *C) { var err error driver := s.driver drv := driver.(*Driver) lastDevID := drv.LastDevID volumeID := uuid.New() volOps, err := driver.VolumeOps() c.Assert(err, IsNil) opts := map[string]string{ convoydriver.OPT_SIZE: strconv.FormatInt(volumeSize, 10), } err = volOps.CreateVolume(volumeID, opts) c.Assert(err, IsNil) c.Assert(drv.LastDevID, Equals, lastDevID+1) err = volOps.CreateVolume(volumeID, opts) c.Assert(err, Not(IsNil)) c.Assert(err, ErrorMatches, "Already has volume with specific uuid.*") volumeID2 := uuid.New() wrongOpts := map[string]string{ convoydriver.OPT_SIZE: "1333333", } err = volOps.CreateVolume(volumeID2, wrongOpts) c.Assert(err, Not(IsNil)) c.Assert(err.Error(), Equals, "Size must be multiple of block size") err = volOps.CreateVolume(volumeID2, opts) c.Assert(err, IsNil) listOpts := map[string]string{ convoydriver.OPT_VOLUME_UUID: volumeID, } _, err = volOps.ListVolume(map[string]string{}) c.Assert(err, IsNil) _, err = volOps.ListVolume(listOpts) c.Assert(err, IsNil) err = volOps.DeleteVolume("123") c.Assert(err, Not(IsNil)) c.Assert(err, ErrorMatches, "Cannot find object .*") err = volOps.DeleteVolume(volumeID2) c.Assert(err, IsNil) err = volOps.DeleteVolume(volumeID) c.Assert(err, IsNil) }
func TestLocalServiceManager(t *testing.T) { assert := assert.New(t) LocalServiceManagerTickerDuration = time.Millisecond * 500 writeFilePath := filepath.Join(os.TempDir(), uuid.New()+".txt") defer os.Remove(writeFilePath) batchFilePath := filepath.Join(os.TempDir(), uuid.New()+".bat") defer os.Remove(batchFilePath) ioutil.WriteFile(batchFilePath, []byte(` echo %time% > `+writeFilePath+` `), 0644) stateFilePath := filepath.Join(os.TempDir(), uuid.New()) defer os.Remove(stateFilePath) lsm := NewLocalServiceManager(stateFilePath) err := lsm.Start() assert.Nil(err) svc := Service{ Name: "fake-service", Directory: os.TempDir(), Command: []string{"cmd.exe", "/q", "/c", batchFilePath}, } err = lsm.Install(svc) assert.Nil(err) var foundFirst, foundLast string fiveSecondsFromNow := time.Now().Add(5 * time.Second) for time.Now().Before(fiveSecondsFromNow) { bs, err := ioutil.ReadFile(writeFilePath) if err == nil { if foundFirst == "" { foundFirst = string(bs) } foundLast = string(bs) if foundLast != foundFirst { break } } time.Sleep(100 * time.Millisecond) } assert.NotEmpty(foundFirst, "expected service manager to run command and command to write file") assert.NotEqual(foundLast, foundFirst, "expected service manager to restart commands when they stop") err = lsm.Uninstall(svc.Name) assert.Nil(err) }
// NewGroup creates Group instance func NewGroup(tasks ...*signatures.TaskSignature) *Group { // Generate a group UUID groupUUID := uuid.New() // Auto generate task UUIDs // Group tasks by common UUID for _, task := range tasks { task.UUID = uuid.New() task.GroupUUID = groupUUID } return &Group{Tasks: tasks} }
func makeRing(config []string) *hashing.HashRing { hr := hashing.NewHashRing() for _, n := range config { fields := strings.Split(n, ":") if len(fields) < 2 { fields = append(fields, uuid.New()) } else if fields[1] == "" { fields[1] = uuid.New() } hr.AddNode(hashing.NewNode(fields[0], fields[1])) } return hr }
func GetListOfRecords() []DbRecord { listOfRecords := make([]DbRecord, 10) listOfRecords[0] = DbRecord{uuid.New(), "Shovel", 32.00} listOfRecords[1] = DbRecord{uuid.New(), "Lamp", 12.97} listOfRecords[2] = DbRecord{uuid.New(), "Food - Bread", 2.00} listOfRecords[3] = DbRecord{uuid.New(), "Backback", 25.02} listOfRecords[4] = DbRecord{uuid.New(), "Food - Water", 2.00} listOfRecords[5] = DbRecord{uuid.New(), "Rope", 10.00} listOfRecords[6] = DbRecord{uuid.New(), "Spoon", 1.00} listOfRecords[7] = DbRecord{uuid.New(), "Food - Meat", 8.00} listOfRecords[8] = DbRecord{uuid.New(), "Tent", 117.00} listOfRecords[9] = DbRecord{uuid.New(), "Food - Apple", 1.50} return listOfRecords }
func TestPodCreate(t *testing.T) { kube := mocks.NewKubeClient() client := new(contrail_mocks.ApiClient) client.Init() client.AddInterceptor("virtual-machine-interface", &VmiInterceptor{}) allocator := new(mocks.AddressAllocator) networkMgr := new(mocks.NetworkManager) controller := NewTestController(kube, client, allocator, networkMgr) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test", Namespace: "testns", UID: kubetypes.UID(uuid.New()), Labels: map[string]string{ "name": "testnet", }, }, } netnsProject := new(types.Project) netnsProject.SetUuid(uuid.New()) netnsProject.SetFQName("", []string{"default-domain", "testns"}) client.Create(netnsProject) testnet := new(types.VirtualNetwork) testnet.SetFQName("project", []string{"default-domain", "testns", "testnet"}) client.Create(testnet) allocator.On("LocateIpAddress", string(pod.ObjectMeta.UID)).Return("10.0.0.42", nil) networkMgr.On("LocateNetwork", "testns", "testnet", controller.config.PrivateSubnet).Return(testnet, nil) networkMgr.On("GetGatewayAddress", testnet).Return("10.0.255.254", nil) kube.PodInterface.On("Update", pod).Return(pod, nil) shutdown := make(chan struct{}) go controller.Run(shutdown) controller.AddPod(pod) time.Sleep(100 * time.Millisecond) type shutdownMsg struct { } shutdown <- shutdownMsg{} kube.PodInterface.AssertExpectations(t) }
func CreateRooms(bedrooms string, bathrooms string, kitchenAppliances []*models.ApplianceModel) []*models.RoomModel { var rooms []*models.RoomModel bedroomCount, _ := strconv.Atoi(bedrooms) bathroomCount, _ := strconv.Atoi(bathrooms) for i := 0; i < bedroomCount; i++ { name := "Bedroom" id := uuid.New() room := models.RoomModel{ Name: &name, RoomType: "bedroom", Level: 1, SpaceModel: models.SpaceModel{ Id: &id, }, } rooms = append(rooms, &room) } for i := 0; i < bathroomCount; i++ { name := "Bathroom" id := uuid.New() room := models.RoomModel{ Name: &name, RoomType: "bathroom", Level: 1, SpaceModel: models.SpaceModel{ Id: &id, }, } rooms = append(rooms, &room) } kitchenName := "Kitchen" kitchenId := uuid.New() kitchen := models.RoomModel{ Name: &kitchenName, RoomType: "kitchen", Level: 1, SpaceModel: models.SpaceModel{ Id: &kitchenId, Appliances: kitchenAppliances, }, } rooms = append(rooms, &kitchen) return rooms }
func New(username string, password string, isAdmin bool) (u *User, err error) { u = &User{Uuid: uuid.New(), Username: username, Password: password, Admin: isAdmin} if err = u.Save(); err != nil { u = nil } return }
//This function will add a resource to the queue. Returns the UUID. func (q *Queue) AddResource(name string) (string, error) { // Check that the address is already in use for _, v := range q.pool { if v.Name == name && v.Status != common.STATUS_QUIT { // We have found a resource with the same address so error log.WithField("name", name).Debug("Resource already exists.") return "", errors.New("Resource already exists!") } } // Create empty resource res := NewResource() res.Name = name res.Status = common.STATUS_PENDING //Generate a UUID for the resource resourceuuid := uuid.New() // Add resource to resource pool with generated UUID q.Lock() q.pool[resourceuuid] = res q.Unlock() return resourceuuid, nil }
func (o *object) addEventResult(e eventResult) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("addEventResult() -> %v", e) } }() if !e.Valid { panic("invalid result") } newres := objectResult{} newres.SourcePlugin = e.Name newres.BranchID = uuid.New() newres.Timestamp = e.Timestamp newres.Collapsed = false newres.Escalated = false newres.Weight = 1 newres.SourceIPV4 = e.SourceIPV4 err = geoObjectResult(&newres) if err != nil { panic(err) } o.Results = append(o.Results, newres) return nil }
func (c *appContext) uploadPic(a *multipart.FileHeader) (string, string) { log.Println("In upload pic territory") bucket := c.bucket file, err := a.Open() defer file.Close() if err != nil { panic(err.Error()) } if err != nil { panic(err) } buf, _ := ioutil.ReadAll(file) fn := uuid.New() fname := "places/" + fn + path.Ext(a.Filename) thumbname := "placesthumb/" + fn + path.Ext(a.Filename) log.Println(fname) b := "http://s3-us-west-2.amazonaws.com/" + c.bucket.Name + "/" + fname d := "http://s3-us-west-2.amazonaws.com/" + c.bucket.Name + "/" + thumbname filetype := http.DetectContentType(buf) err = bucket.Put(fname, buf, filetype, s3.PublicRead) if err != nil { log.Println("bucket put error for main image") panic(err.Error()) } log.Print("added a full image") img, err := jpeg.Decode(bytes.NewReader(buf)) if err != nil { log.Println(err.Error()) } m := resize.Resize(200, 200, img, resize.Lanczos2) buf2 := new(bytes.Buffer) err = jpeg.Encode(buf2, m, nil) if err != nil { fmt.Println(err.Error()) } thumb := buf2.Bytes() filetype2 := http.DetectContentType(thumb) err = bucket.Put(thumbname, thumb, filetype2, s3.PublicRead) if err != nil { log.Println("bucket put error for thumbnail") panic(err.Error()) } log.Println("uploaded one thumb image") return b, d }
// Create a session, add it to the cache and plug it into the DB. func (t *SessionManager) CreateSessionForUser(uid int64) (string, error) { session_uuid := uuid.New() // Get the user's info user_data, err := GetUserById(t.db, uid) if err != nil { return "", err } // Create the session object and put it in the local cache user_session := new(Session) user_session.User = user_data user_session.Expires = time.Now().Add(48 * time.Hour) t.add_session_to_cache(session_uuid, user_session) // Store the token in the database _, err = t.db.Exec(`INSERT INTO degreesheep.user_session ( token, user_id, expire_time ) VALUES (?, ?, ?)`, session_uuid, uid, user_session.Expires) if err != nil { // This isn't a fatal error since the session will be known by this API // server, but the session will be lost if the api server is restarted. // Can also lead to premature expiry in highly available API clusters. log.Println("CreateSessionForUser", err) } return session_uuid, nil }
func (mp *MegaProvider) Get(loc Location) (io.ReadCloser, error) { ref := mp.parse(loc) client, err := mp.getClient(ref) if err != nil { return nil, err } defer client.Close() node, err := mp.getNode(client.Mega, ref.path, false) if err != nil { return nil, err } tmpName := filepath.Join(os.TempDir(), uuid.New()) err = client.DownloadFile(node, tmpName, nil) if err != nil { os.Remove(tmpName) return nil, fmt.Errorf("failed to download file: %v", err) } f, err := os.Open(tmpName) if err != nil { return nil, fmt.Errorf("failed to open temporary file: %v", err) } return DeleteOnClose{f}, nil }
func (s *TestSuite) TestListConfigIDs(c *C) { tmpdir, err := ioutil.TempDir("/tmp", "convoy") c.Assert(err, IsNil) defer os.RemoveAll(tmpdir) prefix := "prefix_" suffix := "_suffix.cfg" ids, err := ListConfigIDs(tmpdir, prefix, suffix) c.Assert(err, Equals, nil) c.Assert(ids, HasLen, 0) counts := 10 uuids := make(map[string]bool) for i := 0; i < counts; i++ { id := uuid.New() uuids[id] = true err := exec.Command("touch", filepath.Join(tmpdir, prefix+id+suffix)).Run() c.Assert(err, IsNil) } uuidList, err := ListConfigIDs(tmpdir, prefix, suffix) c.Assert(err, Equals, nil) c.Assert(uuidList, HasLen, counts) for i := 0; i < counts; i++ { uuids[uuidList[i]] = false } for _, notCovered := range uuids { c.Assert(notCovered, Equals, false) } }
func (s *TestSuite) TestExtractUUIDs(c *C) { prefix := "prefix_" suffix := ".suffix" counts := 10 uuids := make([]string, counts) names := make([]string, counts) for i := 0; i < counts; i++ { uuids[i] = uuid.New() names[i] = prefix + uuids[i] + suffix } result, err := ExtractUUIDs(names, "prefix_", ".suffix") c.Assert(err, Equals, nil) for i := 0; i < counts; i++ { c.Assert(result[i], Equals, uuids[i]) } names[0] = "/" + names[0] result, err = ExtractUUIDs(names, "prefix_", ".suffix") c.Assert(err, Equals, nil) c.Assert(result[0], Equals, uuids[0]) names[0] = "prefix_dd_xx.suffix" result, err = ExtractUUIDs(names, "prefix_", ".suffix") c.Assert(err, ErrorMatches, "Invalid name.*") }
func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { d := inmemory.New() for i := 0; i < numUploads; i++ { addUploads(t, d, uuid.New(), repoName, startedAt) } return d }
func TestPurgeOnlyUploads(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) // Create a directory tree outside _uploads and ensure // these files aren't deleted. dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) if err != nil { t.Fatalf(err.Error()) } nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) if strings.Index(nonUploadPath, "_upload") != -1 { t.Fatalf("Non-upload path not created correctly") } nonUploadFile := path.Join(nonUploadPath, "file") if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { t.Fatalf("Unable to write data file") } deleted, errs := PurgeUploads(fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } for _, file := range deleted { if strings.Index(file, "_upload") == -1 { t.Errorf("Non-upload file deleted") } } }
func TestRunFailure(t *testing.T) { // Create a queue & start the resource q := Queue{authToken: "FailureTest"} l := startRPCOnce("tcp", addr, &q) defer l.Close() // Add the failure tool tool := new(simpleFailerTooler) tool.SetUUID(uuid.New()) q.tools = append(q.tools, tool) // Build the RPC client client, err := rpc.Dial("tcp", addr) if err != nil { t.Fatal("Error dailing RPC servers.", err) } // Build the job to send to the simpleFailureTask params := map[string]string{"failFunc": "Run"} job := common.NewJob(tool.UUID(), "Failure Test", "GoTestSuite", params) // Try and create the job... we should get a failure call := common.RPCCall{Auth: "FailureTest", Job: job} err = client.Call("Queue.AddTask", call, nil) if err == nil { t.Fatal("Failure task's error was not returned.") } }
// maintains a persistent uuid func AppIdLocker(callback func(string, *os.File)) func(*os.File) bool { return func(f *os.File) bool { var lines []string f.Seek(0, 0) scanner := bufio.NewScanner(f) for scanner.Scan() { lines = append(lines, scanner.Text()) } var id string if scanner.Err() != nil || len(lines) == 0 { f.Seek(0, 0) id = uuid.New() _, err := fmt.Fprintf(f, "%s\n", id) if err != nil { return false } err = f.Sync() if err != nil { return false } } else if len(lines) > 0 { id = lines[0] } callback(id, f) return true } }
func (s *TestSuite) TestListConfigIDs(c *C) { tmpdir, err := ioutil.TempDir("/tmp", "rancher-volume") c.Assert(err, IsNil) defer os.RemoveAll(tmpdir) prefix := "prefix_" suffix := "_suffix.cfg" ids := ListConfigIDs(tmpdir, prefix, suffix) c.Assert(ids, HasLen, 0) counts := 10 uuids := make(map[string]bool) for i := 0; i < counts; i++ { id := uuid.New() uuids[id] = true err := exec.Command("touch", filepath.Join(tmpdir, prefix+id+suffix)).Run() c.Assert(err, IsNil) } uuidList := ListConfigIDs(tmpdir, prefix, suffix) c.Assert(uuidList, HasLen, counts) for i := 0; i < counts; i++ { _, exists := uuids[uuidList[i]] c.Assert(exists, Equals, true) } }