func checkUUID(addr string, wantID [16]byte) { resp, err := http.Get(urlJoin(addr, "/data/?mode=uuid")) if err != nil { dieUnknown("Couldn't get uuid: %v", err) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { dieUnknown("Got response code %v from uuid request", resp.StatusCode) } data, err := ioutil.ReadAll(resp.Body) if err != nil { dieUnknown("Couldn't read response for uuid: %v", err) } resp.Body.Close() id, err := uuid.Parse(string(data)) if err != nil { dieUnknown("Couldn't parse uuid from server: %v", err) } if id != wantID { fmt.Printf("wanted id %v, got id %v\n", uuid.Fmt(wantID), uuid.Fmt(id)) os.Exit(2) } }
func (l *Layer) WALClear(id [16]byte) error { err := l.LinearizeOn(uuid.Fmt(id)) if err != nil { return err } key := tuple.MustAppend(nil, "wal2", id) p, err := l.inner.Get(key) if err != nil && err != kvl.ErrNotFound { return err } timestamps, err := walParse(p.Value) if err != nil { return err } if len(timestamps) == 0 { return errors.New("WAL does not have locks on that id") } // Remove most recent timestamp. // // In the case of hung/stopped processes that marked an id but never // cleared it, this effectively assumes that the hung one is the *oldest* // one, making the WAL lock unneccessarily longer than it should be. This // is not ideal, but is safe. timestamps = timestamps[:len(timestamps)-1] if len(timestamps) == 0 { return l.inner.Delete(key) } return l.inner.Set(kvl.Pair{key, walDump(timestamps)}) }
// CreateDirectory initializes a new Directory at the given location, suitable // for OpenDirectory. It will return an error if one already exists. func CreateDirectory(dir string) error { dirs := []string{ dir, filepath.Join(dir, "data"), filepath.Join(dir, "quarantine"), } for _, d := range dirs { err := os.Mkdir(d, 0777) if err != nil && !os.IsExist(err) { return err } } f, err := os.OpenFile(filepath.Join(dir, "uuid"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666) if err != nil { return err } _, err = f.Write([]byte(uuid.Fmt(uuid.Gen4()))) if err != nil { f.Close() return err } return f.Close() }
func TestHTTPCommon(t *testing.T) { mock := storetests.NewMockStore(0) srv := httptest.NewServer(NewServer(mock)) defer srv.Close() client, err := NewClient(srv.URL + "/") if err != nil { t.Fatalf("Couldn't initialize client: %v", err) } storetests.TestStore(t, client) if client.UUID() != mock.UUID() { t.Errorf("client UUID %v does not match directory UUID %v", uuid.Fmt(client.UUID()), uuid.Fmt(mock.UUID())) } }
func (h *Handler) serveUUIDs(w http.ResponseWriter, r *http.Request) { var resp bytes.Buffer for _, st := range h.stores { id := st.UUID() if id != [16]byte{} { fmt.Fprintf(&resp, "%v\n", uuid.Fmt(id)) } } w.WriteHeader(http.StatusOK) w.Write(resp.Bytes()) }
func (l *Layer) WALCheck(id [16]byte) (bool, error) { err := l.LinearizeOn(uuid.Fmt(id)) if err != nil { return false, err } _, err = l.inner.Get(tuple.MustAppend(nil, "wal2", id)) if err != nil { if err == kvl.ErrNotFound { err = nil } return false, err } return true, nil }
func (ds *Directory) hashcheckLoop() error { for { _, bad := ds.hashstep() if bad != 0 { log.Printf("Found %v bad items hash check on %v\n", bad, uuid.Fmt(ds.UUID())) } select { case <-time.After(5 * time.Second): case <-ds.tomb.Dying(): return nil } } }
func chunkServer() { loadConfigOrDie() debug.SetGCPercent(config.GCPercent) stores := make([]store.Store, len(config.Chunk.Dirs)) for i := range config.Chunk.Dirs { dir := config.Chunk.Dirs[i] construct := func() store.Store { log.Printf("Trying to open store at %v", dir) start := time.Now() ds, err := storedir.OpenDirectory( dir, config.Chunk.Scrubber.SleepPerFile.Duration, config.Chunk.Scrubber.SleepPerByte.Duration, ) if err != nil { log.Printf("Couldn't open store at %v: %v", dir, err) return nil } dur := time.Now().Sub(start) log.Printf("Store at %v opened with UUID %v in %v", dir, uuid.Fmt(ds.UUID()), dur) return ds } stores[i] = store.NewRetryStore(construct, time.Second*15) } var h http.Handler var err error h, err = chunkserver.New(stores) if err != nil { log.Fatalf("Couldn't initialize handler: %v", err) } h = httputil.NewLimitParallelism(config.Chunk.ParallelRequests, h) h = httputil.AddDebugHandlers(h, config.Chunk.Debug) if !config.Chunk.DisableHTTPLogging { h = httputil.LogHTTPRequests(h) } serveOrDie(config.Chunk.Listen, h) }
func (l *Layer) WALMark(id [16]byte) error { err := l.LinearizeOn(uuid.Fmt(id)) if err != nil { return err } key := tuple.MustAppend(nil, "wal2", id) p, err := l.inner.Get(key) if err != nil && err != kvl.ErrNotFound { return err } timestamps, err := walParse(p.Value) if err != nil { return err } timestamps = append(timestamps, time.Now().Unix()) return l.inner.Set(kvl.Pair{key, walDump(timestamps)}) }
func (f *File) indexPairs() []kvl.Pair { ret := make([]kvl.Pair, 0, len(f.Locations)*2+1) for idx, loc := range f.Locations { localKey := fmt.Sprintf("%v_%x_%v", uuid.Fmt(f.PrefixID), f.SHA256[:8], idx) ret = append(ret, kvl.Pair{ tuple.MustAppend(nil, "file", "location", loc, f.Path), nil, }, kvl.Pair{ tuple.MustAppend(nil, "locationlist", loc, localKey), nil, }) } ret = append(ret, kvl.Pair{ tuple.MustAppend(nil, "file", "prefix", f.PrefixID), []byte(f.Path), }) return ret }
func (h *Server) serveUUID(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte(uuid.Fmt(h.store.UUID()))) }
func (l *Layer) WALClearOld() error { var rang kvl.RangeQuery rang.Low, rang.High = keys.PrefixRange(tuple.MustAppend(nil, "wal2")) rang.Limit = 100 for { ps, err := l.inner.Range(rang) if err != nil { return err } now := time.Now().Unix() for _, p := range ps { var typ string var id [16]byte err = tuple.UnpackInto(p.Key, &typ, &id) if err != nil { return err } timestamps, err := walParse(p.Value) if err != nil { return err } newestAge := time.Duration(now-timestamps[len(timestamps)-1]) * time.Second oldestAge := time.Duration(now-timestamps[0]) * time.Second if newestAge < -walUnsafeFutureAge { log.Printf("WARNING: WAL entry %v is %v in the future! Skipping it.", uuid.Fmt(id), newestAge) } else if oldestAge > walUnsafeOldAge { log.Printf("WARNING: WAL entry %v is %v in the past! Skipping it.", uuid.Fmt(id), oldestAge) } else if oldestAge > walExpireAge { log.Printf("Removing expired WAL entry %v (%v old)", uuid.Fmt(id), oldestAge) // Remove the oldest timestamp (only); we'll get to the other timestamps // in future passes if needed. timestamps = timestamps[1:] if len(timestamps) == 0 { err = l.inner.Delete(p.Key) if err != nil { return err } } else { err = l.inner.Set(kvl.Pair{p.Key, walDump(timestamps)}) if err != nil { return err } } } } if len(ps) < rang.Limit { break } rang.Low = ps[len(ps)-1].Key } return nil }
func TestLayerFileGetByLocation(t *testing.T) { db := ram.New() err := db.RunTx(func(ctx kvl.Ctx) error { l, err := Open(ctx) if err != nil { return err } idA := [16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} idB := [16]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} idC := [16]byte{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} fileA := File{ Path: "path to a", Size: 4, SHA256: sha256.Sum256([]byte("abcd")), WriteTime: time.Now().Unix(), PrefixID: uuid.Gen4(), DataChunks: 2, MappingValue: 0, Locations: [][16]byte{idB, idC}, } fileB := File{ Path: "path to b", Size: 12, SHA256: sha256.Sum256([]byte("goodbye dude")), WriteTime: time.Now().Unix(), PrefixID: uuid.Gen4(), DataChunks: 1, MappingValue: 0, Locations: [][16]byte{idA, idB, idC}, } err = l.SetFile(&fileA) if err != nil { t.Errorf("Couldn't SetFile(%#v): %v", fileA, err) return err } err = l.SetFile(&fileB) if err != nil { t.Errorf("Couldn't SetFile(%#v): %v", fileB, err) return err } fs, err := l.GetFilesByLocation(idB, 1) if err != nil { t.Errorf("Couldn't GetFilesByLocation: %v", err) return err } if len(fs) != 1 || !reflect.DeepEqual(fs[0], fileA) { t.Errorf("GetFilesByLocation(%v, 1) returned %#v, but wanted %#v", uuid.Fmt(idB), fs, []File{fileA}) } fs, err = l.GetFilesByLocation(idA, 3) if err != nil { t.Errorf("Couldn't GetFilesByLocation: %v", err) return err } if len(fs) != 1 || !reflect.DeepEqual(fs[0], fileB) { t.Errorf("GetFilesByLocation(%v, 1) returned %#v, but wanted %#v", uuid.Fmt(idA), fs, []File{fileB}) } fs, err = l.GetFilesByLocation(idC, 3) if err != nil { t.Errorf("Couldn't GetFilesByLocation: %v", err) return err } if len(fs) != 2 || !reflect.DeepEqual(fs[0], fileA) || !reflect.DeepEqual(fs[1], fileB) { t.Errorf("GetFilesByLocation(%v, 3) returned %#v, but wanted %#v", uuid.Fmt(idC), fs, []File{fileA, fileB}) } return nil }) if err != nil { t.Errorf("Couldn't run transaction: %v", err) } }
func (m *Multi) scrubLocationsStep() (bool, error) { uuidCmp := func(a, b [16]byte) int { for i := 0; i < 16; i++ { if a[i] < b[i] { return -1 } else if a[i] > b[i] { return 1 } } return 0 } var thisLoc meta.Location var haveLoc bool var from string var wantFiles []string err := m.db.RunTx(func(ctx kvl.Ctx) error { from = "" wantFiles = nil haveLoc = false layer, err := meta.Open(ctx) if err != nil { return err } pos, err := layer.GetConfig("scrublocationpos") if err != nil { return err } // parse errors okay, since will be the zero UUID since, _ := uuid.Parse(string(pos)) allLocations, err := layer.AllLocations() if err != nil { return err } if len(allLocations) == 0 { // no locations exist return nil } for { // get the Location with the lowest UUID greater than since haveLoc = false for _, loc := range allLocations { if uuidCmp(loc.UUID, since) >= 0 && (!haveLoc || uuidCmp(thisLoc.UUID, loc.UUID) > 0) { thisLoc = loc haveLoc = true } } if haveLoc { break } // all locations handled already, try again from the top if since == [16]byte{} { panic("locations exist but couldn't find one") } since = [16]byte{} } // set since to the next highest UUID since = thisLoc.UUID for i := 15; i >= 0; i-- { since[i]++ if since[i] != 0 { break } } err = layer.SetConfig("scrublocationpos", []byte(uuid.Fmt(since))) if err != nil { return err } // now get the file list in the location we've chosen, resuming at the // saved position (per location) fromB, err := layer.GetConfig( "scrublocationpos-" + uuid.Fmt(thisLoc.UUID)) if err != nil { return err } from = string(fromB) files, err := layer.GetLocationContents(thisLoc.UUID, from, scrubLocationsCount) if err != nil { return err } var newLocPos string if len(files) > 0 { newLocPos = files[len(files)-1] } else { newLocPos = "" } err = layer.SetConfig( "scrublocationpos-"+uuid.Fmt(thisLoc.UUID), []byte(newLocPos)) if err != nil { return err } wantFiles = files return nil }) if err != nil { return false, err } if !haveLoc { // no chunk servers return true, nil } wantFilesMap := make(map[string]struct{}, len(wantFiles)) for _, f := range wantFiles { wantFilesMap[f] = struct{}{} } st := m.finder.StoreFor(thisLoc.UUID) if st == nil { log.Printf("Couldn't scrubLocation %v, it is offline", uuid.Fmt(thisLoc.UUID)) return false, nil } haveFiles, err := st.List(from, scrubLocationsCount, nil) if err != nil { log.Printf("Couldn't List from %v: %v", uuid.Fmt(thisLoc.UUID), err) return false, nil } haveFilesMap := make(map[string]struct{}, len(haveFiles)) for _, f := range haveFiles { haveFilesMap[f] = struct{}{} } checkHaveFiles := make([]string, 0, len(haveFiles)) for _, f := range haveFiles { if len(wantFiles) < scrubLocationsCount || f <= wantFiles[len(wantFiles)-1] { checkHaveFiles = append(checkHaveFiles, f) } } checkWantFiles := make([]string, 0, len(wantFiles)) for _, f := range wantFiles { if len(haveFiles) < scrubLocationsCount || f <= haveFiles[len(haveFiles)-1] { checkWantFiles = append(checkWantFiles, f) } } for _, have := range checkHaveFiles { if _, ok := wantFilesMap[have]; !ok { pid, err := prefixIDFromLocalKey(have) if err != nil { log.Printf("Couldn't figure out PrefixID from localKey(%#v): %v", have, err) err = st.CAS(have, store.AnyV, store.MissingV, nil) if err != nil { log.Printf("Couldn't delete extraneous chunk %v from %v: %v", have, uuid.Fmt(thisLoc.UUID), err) continue } continue } var inWAL bool err = m.db.RunTx(func(ctx kvl.Ctx) error { layer, err := meta.Open(ctx) if err != nil { return err } inWAL, err = layer.WALCheck(pid) if err != nil { return err } if !inWAL { // check to see if the write finished inWAL, err = layer.LocationShouldHave(thisLoc.UUID, have) if err != nil { return err } } return nil }) if err != nil { log.Printf("Couldn't check WAL for PrefixID %v: %v", uuid.Fmt(pid), err) continue } if inWAL { log.Printf("extraneous chunk %v on %v, but is in WAL, skipping", have, uuid.Fmt(thisLoc.UUID)) continue } log.Printf("deleting extraneous chunk %v on %v", have, uuid.Fmt(thisLoc.UUID)) err = st.CAS(have, store.AnyV, store.MissingV, nil) if err != nil { log.Printf("Couldn't delete extraneous chunk %v from %v: %v", have, uuid.Fmt(thisLoc.UUID), err) continue } } } for _, want := range checkWantFiles { if _, ok := haveFilesMap[want]; !ok { log.Printf("missing chunk %v on %v", want, uuid.Fmt(thisLoc.UUID)) pid, err := prefixIDFromLocalKey(want) if err != nil { log.Printf("Couldn't figure out PrefixID from localKey(%#v): %v", want, err) continue } var inWAL bool var path string err = m.db.RunTx(func(ctx kvl.Ctx) error { path = "" layer, err := meta.Open(ctx) if err != nil { return err } inWAL, err = layer.WALCheck(pid) if err != nil { return err } if !inWAL { path, err = layer.PathForPrefixID(pid) if err != nil { return err } } return nil }) if err != nil { log.Printf("Couldn't get path for PrefixID %v: %v", uuid.Fmt(pid), err) continue } if inWAL { log.Printf("skipping rebuild of %v, prefix in WAL", path) continue } err = m.rebuild(path) if err != nil { log.Printf("Couldn't rebuild %v: %v", path, err) continue } log.Printf("successfully rebuilt %v", path) } } if thisLoc.Dead && len(wantFiles) > 0 { // dead stores should be cleared for _, want := range wantFiles { pid, err := prefixIDFromLocalKey(want) if err != nil { // already logged in a loop above continue } var inWAL bool var path string err = m.db.RunTx(func(ctx kvl.Ctx) error { path = "" layer, err := meta.Open(ctx) if err != nil { return err } inWAL, err = layer.WALCheck(pid) if err != nil { return err } if !inWAL { path, err = layer.PathForPrefixID(pid) if err != nil { return err } } return nil }) if err != nil { log.Printf("Couldn't check wal/path for PrefixID %v: %v", uuid.Fmt(pid), err) continue } if inWAL { log.Printf("skipping rebuild of PrefixID %v on dead store, prefix in WAL", uuid.Fmt(pid)) continue } err = m.rebuild(path) if err != nil { log.Printf("Couldn't rebuild %v: %v", path, err) continue } log.Printf("successfully rebuilt %v", path) } } return len(wantFiles) == 0, nil }
func (m *Multi) rebalanceFile(f meta.File, finderEntries map[[16]byte]FinderEntry) (bool, error) { // search for the lowest free location that this file is stored on var minI int var minF int64 var minS store.Store for i, l := range f.Locations { fe, ok := finderEntries[l] if !ok { continue } if minS == nil || minF > fe.Free { minS = fe.Store minF = fe.Free minI = i } } if minS == nil { return false, nil } // search for the highest free location that this file is NOT stored on var maxF int64 var maxS store.Store for id, fe := range finderEntries { if fe.Dead { continue } found := false for _, locid := range f.Locations { if locid == id { found = true break } } if found { continue } if maxS == nil || maxF < fe.Free { maxF = fe.Free maxS = fe.Store } } if maxS == nil { return false, nil } if maxF-minF < rebalanceMinDifference { return false, nil } // we should move chunk minI on minS to maxS err := m.db.RunTx(func(ctx kvl.Ctx) error { l, err := meta.Open(ctx) if err != nil { return err } return l.WALMark(f.PrefixID) }) if err != nil { return false, err } defer func() { // TODO: how to handle errors here? m.db.RunTx(func(ctx kvl.Ctx) error { l, err := meta.Open(ctx) if err != nil { return err } return l.WALClear(f.PrefixID) }) }() localKey := localKeyFor(&f, minI) data, st, err := minS.Get(localKey, store.GetOptions{}) if err != nil { return false, err } setCASV := store.CASV{ Present: true, SHA256: st.SHA256, Data: data, } err = maxS.CAS(localKey, store.MissingV, setCASV, nil) if err != nil { return false, err } newF := f newF.Locations = make([][16]byte, len(f.Locations)) copy(newF.Locations, f.Locations) newF.Locations[minI] = maxS.UUID() err = m.db.RunTx(func(ctx kvl.Ctx) error { l, err := meta.Open(ctx) if err != nil { return err } f2, err := l.GetFile(f.Path) if err != nil { return err } if f2.PrefixID != f.PrefixID { return errModifiedDuringBalance } if len(f2.Locations) != len(f.Locations) { return errModifiedDuringBalance } for i, floc := range f.Locations { if floc != f2.Locations[i] { return errModifiedDuringBalance } } err = l.SetFile(&newF) if err != nil { return err } return nil }) if err != nil { maxS.CAS(localKey, setCASV, store.MissingV, nil) // ignore error return false, err } err = minS.CAS(localKey, setCASV, store.MissingV, nil) if err != nil { log.Printf("Couldn't remove rebalanced chunk from old location %v: %v", uuid.Fmt(maxS.UUID()), err) } fe := finderEntries[minS.UUID()] fe.Free += int64(len(data)) finderEntries[minS.UUID()] = fe fe = finderEntries[maxS.UUID()] fe.Free -= int64(len(data)) finderEntries[maxS.UUID()] = fe return true, nil }
func localKeyFor(file *meta.File, idx int) string { return fmt.Sprintf("%v_%x_%v", uuid.Fmt(file.PrefixID), file.SHA256[:8], idx) }
func (h *Handler) serveStores(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { var req storesRequest err := json.NewDecoder(r.Body).Decode(&req) if err != nil { httputil.RespondJSONError(w, err.Error(), http.StatusBadRequest) return } switch req.Operation { case "rescan": err = h.finder.Rescan() if err != nil { httputil.RespondJSONError(w, fmt.Sprintf("Couldn't rescan: %v", err), http.StatusInternalServerError) return } case "scan": err = h.finder.Scan(req.URL) if err != nil { httputil.RespondJSONError(w, fmt.Sprintf("Couldn't scan %v: %v", req.URL, err), http.StatusBadRequest) return } case "dead", "undead": id, err := uuid.Parse(req.UUID) if err != nil { httputil.RespondJSONError(w, "Couldn't parse UUID", http.StatusBadRequest) return } err = h.db.RunTx(func(ctx kvl.Ctx) error { layer, err := meta.Open(ctx) if err != nil { return err } loc, err := layer.GetLocation(id) if err != nil { return err } if loc == nil { return kvl.ErrNotFound } loc.Dead = req.Operation == "dead" return layer.SetLocation(*loc) }) if err != nil { if err == kvl.ErrNotFound { httputil.RespondJSONError(w, "No store with that UUID", http.StatusBadRequest) return } httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError) return } case "delete": id, err := uuid.Parse(req.UUID) if err != nil { httputil.RespondJSONError(w, "Couldn't parse UUID", http.StatusBadRequest) return } st := h.finder.StoreFor(id) if st != nil { httputil.RespondJSONError(w, "UUID currently connected", http.StatusBadRequest) return } err = h.db.RunTx(func(ctx kvl.Ctx) error { layer, err := meta.Open(ctx) if err != nil { return err } loc, err := layer.GetLocation(id) if err != nil { return err } if loc == nil { return kvl.ErrNotFound } return layer.DeleteLocation(*loc) }) if err != nil { if err == kvl.ErrNotFound { httputil.RespondJSONError(w, "No store with that UUID", http.StatusBadRequest) return } httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError) return } default: httputil.RespondJSONError(w, "unsupported operation", http.StatusBadRequest) return } } else if r.Method != "GET" { w.Header().Set("Allow", "GET, POST") httputil.RespondJSONError(w, "bad method", http.StatusMethodNotAllowed) return } finderEntries := h.finder.Stores() ret := make([]storesResponseEntry, 0, 10) err := h.db.RunReadTx(func(ctx kvl.Ctx) error { ret = ret[:0] layer, err := meta.Open(ctx) if err != nil { return err } locs, err := layer.AllLocations() if err != nil { return err } for _, loc := range locs { fe, connected := finderEntries[loc.UUID] ret = append(ret, storesResponseEntry{ UUID: uuid.Fmt(loc.UUID), URL: loc.URL, Name: loc.Name, Dead: loc.Dead, Connected: connected, LastSeen: time.Unix(loc.LastSeen, 0).UTC(), Free: fe.Free, }) } return nil }) if err != nil { httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(ret) }
func TestLayerLocationContents(t *testing.T) { db := ram.New() err := db.RunTx(func(ctx kvl.Ctx) error { l, err := Open(ctx) if err != nil { return err } locA := uuid.Gen4() locB := uuid.Gen4() locC := uuid.Gen4() files := []File{ { Path: "file-a", Size: 1, SHA256: sha256.Sum256([]byte{'a'}), WriteTime: time.Now().Unix(), PrefixID: uuid.Gen4(), DataChunks: 2, MappingValue: 0, Locations: [][16]byte{locA, locB, locC}, }, { Path: "file-b", Size: 1, SHA256: sha256.Sum256([]byte{'b'}), WriteTime: time.Now().Unix(), PrefixID: uuid.Gen4(), DataChunks: 2, MappingValue: 0, Locations: [][16]byte{locB, locA, locC}, }, { Path: "file-c", Size: 1, SHA256: sha256.Sum256([]byte{'c'}), WriteTime: time.Now().Unix(), PrefixID: uuid.Gen4(), DataChunks: 2, MappingValue: 0, Locations: [][16]byte{locC, locA}, }, } for _, file := range files { err := l.SetFile(&file) if err != nil { t.Errorf("Couldn't SetFile: %v", err) return err } } localKeyFor := func(file File, idx int) string { return fmt.Sprintf("%v_%x_%v", uuid.Fmt(file.PrefixID), file.SHA256[:8], idx) } tests := []struct { ID [16]byte Entries []string }{ { ID: locA, Entries: []string{ localKeyFor(files[0], 0), localKeyFor(files[1], 1), localKeyFor(files[2], 1), }, }, { ID: locB, Entries: []string{ localKeyFor(files[0], 1), localKeyFor(files[1], 0), }, }, { ID: locC, Entries: []string{ localKeyFor(files[0], 2), localKeyFor(files[1], 2), localKeyFor(files[2], 0), }, }, } for _, test := range tests { sort.Strings(test.Entries) names, err := l.GetLocationContents(test.ID, "", 0) if err != nil { t.Errorf("Couldn't GetLocationContents(%#v, \"\", 0): %v", uuid.Fmt(test.ID), err) return err } if !reflect.DeepEqual(names, test.Entries) { t.Errorf("GetLocationContents(%#v, \"\", 0) returned %#v, wanted %#v", uuid.Fmt(test.ID), names, test.Entries) } names = nil after := "" for { localNames, err := l.GetLocationContents(test.ID, after, 1) if err != nil { t.Errorf("Couldn't GetLocationContents(%#v, %#v, %v): %v", uuid.Fmt(test.ID), after, 1, err) return err } if len(localNames) > 1 { t.Errorf("GetLocationContents(%#v, %#v, %v) = %#v, too many results", uuid.Fmt(test.ID), after, 1, localNames) } if len(localNames) == 0 { break } names = append(names, localNames...) after = localNames[0] } if !reflect.DeepEqual(names, test.Entries) { t.Errorf("GetLocationContents on %v one by one returned %#v, wanted %#v", uuid.Fmt(test.ID), names, test.Entries) } } return nil }) if err != nil { t.Errorf("Couldn't run transaction: %v", err) } }