Ejemplo n.º 1
0
func (m *Multi) scrubWAL() error {
	return m.db.RunTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		return layer.WALClearOld()
	})
}
Ejemplo n.º 2
0
func (m *Multi) getFile(key string) (*meta.File, error) {
	var file *meta.File
	err := m.db.RunReadTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		file, err = layer.GetFile(key)
		return err
	})
	if err != nil {
		return nil, err
	}
	return file, nil
}
Ejemplo n.º 3
0
func (m *Multi) scrubLocationsAll() {
	// Get the number of locations; scrubLocationsStep will return true when it
	// hits the end of each of them.
	//
	// We assume there are no concurrent adding/removing of Locations.
	var need int
	err := m.db.RunReadTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		locs, err := layer.AllLocations()
		if err != nil {
			return err
		}

		need = len(locs) * 2

		return nil
	})
	if err != nil {
		log.Printf("Couldn't get all locations: %v", err)
		return
	}

	if need == 0 {
		return
	}

	endpoints := 0
	for {
		done, err := m.scrubLocationsStep()
		if err != nil {
			log.Printf("Couldn't scrubLocationsStep in scrubLocationsAll: %v", err)
			return
		}
		if done {
			endpoints++
			if endpoints >= need {
				return
			}
		}
	}
}
Ejemplo n.º 4
0
func (m *Multi) List(after string, limit int, cancel <-chan struct{}) ([]string, error) {
	var files []meta.File
	err := m.db.RunReadTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		files, err = layer.ListFiles(after, limit)
		return err
	})
	if err != nil {
		return nil, err
	}

	names := make([]string, len(files))
	for i, file := range files {
		names[i] = file.Path
	}

	return names, nil
}
Ejemplo n.º 5
0
func (h *Handler) serveStores(w http.ResponseWriter, r *http.Request) {
	if r.Method == "POST" {
		var req storesRequest
		err := json.NewDecoder(r.Body).Decode(&req)
		if err != nil {
			httputil.RespondJSONError(w, err.Error(), http.StatusBadRequest)
			return
		}

		switch req.Operation {
		case "rescan":
			err = h.finder.Rescan()
			if err != nil {
				httputil.RespondJSONError(w, fmt.Sprintf("Couldn't rescan: %v", err),
					http.StatusInternalServerError)
				return
			}

		case "scan":
			err = h.finder.Scan(req.URL)
			if err != nil {
				httputil.RespondJSONError(w, fmt.Sprintf("Couldn't scan %v: %v", req.URL, err),
					http.StatusBadRequest)
				return
			}

		case "dead", "undead":
			id, err := uuid.Parse(req.UUID)
			if err != nil {
				httputil.RespondJSONError(w, "Couldn't parse UUID", http.StatusBadRequest)
				return
			}

			err = h.db.RunTx(func(ctx kvl.Ctx) error {
				layer, err := meta.Open(ctx)
				if err != nil {
					return err
				}

				loc, err := layer.GetLocation(id)
				if err != nil {
					return err
				}

				if loc == nil {
					return kvl.ErrNotFound
				}

				loc.Dead = req.Operation == "dead"

				return layer.SetLocation(*loc)
			})
			if err != nil {
				if err == kvl.ErrNotFound {
					httputil.RespondJSONError(w, "No store with that UUID",
						http.StatusBadRequest)
					return
				}
				httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError)
				return
			}

		case "delete":
			id, err := uuid.Parse(req.UUID)
			if err != nil {
				httputil.RespondJSONError(w, "Couldn't parse UUID", http.StatusBadRequest)
				return
			}

			st := h.finder.StoreFor(id)
			if st != nil {
				httputil.RespondJSONError(w, "UUID currently connected", http.StatusBadRequest)
				return
			}

			err = h.db.RunTx(func(ctx kvl.Ctx) error {
				layer, err := meta.Open(ctx)
				if err != nil {
					return err
				}

				loc, err := layer.GetLocation(id)
				if err != nil {
					return err
				}

				if loc == nil {
					return kvl.ErrNotFound
				}

				return layer.DeleteLocation(*loc)
			})
			if err != nil {
				if err == kvl.ErrNotFound {
					httputil.RespondJSONError(w, "No store with that UUID",
						http.StatusBadRequest)
					return
				}
				httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError)
				return
			}

		default:
			httputil.RespondJSONError(w, "unsupported operation", http.StatusBadRequest)
			return
		}
	} else if r.Method != "GET" {
		w.Header().Set("Allow", "GET, POST")
		httputil.RespondJSONError(w, "bad method", http.StatusMethodNotAllowed)
		return
	}

	finderEntries := h.finder.Stores()

	ret := make([]storesResponseEntry, 0, 10)
	err := h.db.RunReadTx(func(ctx kvl.Ctx) error {
		ret = ret[:0]

		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		locs, err := layer.AllLocations()
		if err != nil {
			return err
		}

		for _, loc := range locs {
			fe, connected := finderEntries[loc.UUID]

			ret = append(ret, storesResponseEntry{
				UUID:      uuid.Fmt(loc.UUID),
				URL:       loc.URL,
				Name:      loc.Name,
				Dead:      loc.Dead,
				Connected: connected,
				LastSeen:  time.Unix(loc.LastSeen, 0).UTC(),
				Free:      fe.Free,
			})
		}

		return nil
	})
	if err != nil {
		httputil.RespondJSONError(w, err.Error(), http.StatusInternalServerError)
		return
	}

	w.Header().Set("content-type", "application/json; charset=utf-8")
	w.WriteHeader(http.StatusOK)
	json.NewEncoder(w).Encode(ret)
}
Ejemplo n.º 6
0
func (m *Multi) scrubLocationsStep() (bool, error) {
	uuidCmp := func(a, b [16]byte) int {
		for i := 0; i < 16; i++ {
			if a[i] < b[i] {
				return -1
			} else if a[i] > b[i] {
				return 1
			}
		}
		return 0
	}

	var thisLoc meta.Location
	var haveLoc bool
	var from string
	var wantFiles []string
	err := m.db.RunTx(func(ctx kvl.Ctx) error {
		from = ""
		wantFiles = nil
		haveLoc = false

		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		pos, err := layer.GetConfig("scrublocationpos")
		if err != nil {
			return err
		}

		// parse errors okay, since will be the zero UUID
		since, _ := uuid.Parse(string(pos))

		allLocations, err := layer.AllLocations()
		if err != nil {
			return err
		}

		if len(allLocations) == 0 {
			// no locations exist
			return nil
		}

		for {
			// get the Location with the lowest UUID greater than since
			haveLoc = false
			for _, loc := range allLocations {
				if uuidCmp(loc.UUID, since) >= 0 && (!haveLoc || uuidCmp(thisLoc.UUID, loc.UUID) > 0) {
					thisLoc = loc
					haveLoc = true
				}
			}

			if haveLoc {
				break
			}

			// all locations handled already, try again from the top
			if since == [16]byte{} {
				panic("locations exist but couldn't find one")
			}
			since = [16]byte{}
		}

		// set since to the next highest UUID
		since = thisLoc.UUID
		for i := 15; i >= 0; i-- {
			since[i]++
			if since[i] != 0 {
				break
			}
		}

		err = layer.SetConfig("scrublocationpos", []byte(uuid.Fmt(since)))
		if err != nil {
			return err
		}

		// now get the file list in the location we've chosen, resuming at the
		// saved position (per location)

		fromB, err := layer.GetConfig(
			"scrublocationpos-" + uuid.Fmt(thisLoc.UUID))
		if err != nil {
			return err
		}
		from = string(fromB)

		files, err := layer.GetLocationContents(thisLoc.UUID, from,
			scrubLocationsCount)
		if err != nil {
			return err
		}

		var newLocPos string
		if len(files) > 0 {
			newLocPos = files[len(files)-1]
		} else {
			newLocPos = ""
		}
		err = layer.SetConfig(
			"scrublocationpos-"+uuid.Fmt(thisLoc.UUID), []byte(newLocPos))
		if err != nil {
			return err
		}

		wantFiles = files

		return nil
	})
	if err != nil {
		return false, err
	}

	if !haveLoc {
		// no chunk servers
		return true, nil
	}

	wantFilesMap := make(map[string]struct{}, len(wantFiles))
	for _, f := range wantFiles {
		wantFilesMap[f] = struct{}{}
	}

	st := m.finder.StoreFor(thisLoc.UUID)
	if st == nil {
		log.Printf("Couldn't scrubLocation %v, it is offline",
			uuid.Fmt(thisLoc.UUID))
		return false, nil
	}

	haveFiles, err := st.List(from, scrubLocationsCount, nil)
	if err != nil {
		log.Printf("Couldn't List from %v: %v", uuid.Fmt(thisLoc.UUID), err)
		return false, nil
	}

	haveFilesMap := make(map[string]struct{}, len(haveFiles))
	for _, f := range haveFiles {
		haveFilesMap[f] = struct{}{}
	}

	checkHaveFiles := make([]string, 0, len(haveFiles))
	for _, f := range haveFiles {
		if len(wantFiles) < scrubLocationsCount || f <= wantFiles[len(wantFiles)-1] {
			checkHaveFiles = append(checkHaveFiles, f)
		}
	}

	checkWantFiles := make([]string, 0, len(wantFiles))
	for _, f := range wantFiles {
		if len(haveFiles) < scrubLocationsCount || f <= haveFiles[len(haveFiles)-1] {
			checkWantFiles = append(checkWantFiles, f)
		}
	}

	for _, have := range checkHaveFiles {
		if _, ok := wantFilesMap[have]; !ok {
			pid, err := prefixIDFromLocalKey(have)
			if err != nil {
				log.Printf("Couldn't figure out PrefixID from localKey(%#v): %v",
					have, err)

				err = st.CAS(have, store.AnyV, store.MissingV, nil)
				if err != nil {
					log.Printf("Couldn't delete extraneous chunk %v from %v: %v",
						have, uuid.Fmt(thisLoc.UUID), err)
					continue
				}

				continue
			}

			var inWAL bool
			err = m.db.RunTx(func(ctx kvl.Ctx) error {
				layer, err := meta.Open(ctx)
				if err != nil {
					return err
				}

				inWAL, err = layer.WALCheck(pid)
				if err != nil {
					return err
				}

				if !inWAL {
					// check to see if the write finished
					inWAL, err = layer.LocationShouldHave(thisLoc.UUID, have)
					if err != nil {
						return err
					}
				}

				return nil
			})
			if err != nil {
				log.Printf("Couldn't check WAL for PrefixID %v: %v",
					uuid.Fmt(pid), err)
				continue
			}

			if inWAL {
				log.Printf("extraneous chunk %v on %v, but is in WAL, skipping",
					have, uuid.Fmt(thisLoc.UUID))
				continue
			}

			log.Printf("deleting extraneous chunk %v on %v",
				have, uuid.Fmt(thisLoc.UUID))
			err = st.CAS(have, store.AnyV, store.MissingV, nil)
			if err != nil {
				log.Printf("Couldn't delete extraneous chunk %v from %v: %v",
					have, uuid.Fmt(thisLoc.UUID), err)
				continue
			}
		}
	}

	for _, want := range checkWantFiles {
		if _, ok := haveFilesMap[want]; !ok {
			log.Printf("missing chunk %v on %v", want, uuid.Fmt(thisLoc.UUID))

			pid, err := prefixIDFromLocalKey(want)
			if err != nil {
				log.Printf("Couldn't figure out PrefixID from localKey(%#v): %v",
					want, err)
				continue
			}

			var inWAL bool
			var path string
			err = m.db.RunTx(func(ctx kvl.Ctx) error {
				path = ""

				layer, err := meta.Open(ctx)
				if err != nil {
					return err
				}

				inWAL, err = layer.WALCheck(pid)
				if err != nil {
					return err
				}

				if !inWAL {
					path, err = layer.PathForPrefixID(pid)
					if err != nil {
						return err
					}
				}

				return nil
			})
			if err != nil {
				log.Printf("Couldn't get path for PrefixID %v: %v",
					uuid.Fmt(pid), err)
				continue
			}

			if inWAL {
				log.Printf("skipping rebuild of %v, prefix in WAL", path)
				continue
			}

			err = m.rebuild(path)
			if err != nil {
				log.Printf("Couldn't rebuild %v: %v", path, err)
				continue
			}

			log.Printf("successfully rebuilt %v", path)
		}
	}

	if thisLoc.Dead && len(wantFiles) > 0 {
		// dead stores should be cleared

		for _, want := range wantFiles {
			pid, err := prefixIDFromLocalKey(want)
			if err != nil {
				// already logged in a loop above
				continue
			}

			var inWAL bool
			var path string
			err = m.db.RunTx(func(ctx kvl.Ctx) error {
				path = ""

				layer, err := meta.Open(ctx)
				if err != nil {
					return err
				}

				inWAL, err = layer.WALCheck(pid)
				if err != nil {
					return err
				}

				if !inWAL {
					path, err = layer.PathForPrefixID(pid)
					if err != nil {
						return err
					}
				}

				return nil
			})
			if err != nil {
				log.Printf("Couldn't check wal/path for PrefixID %v: %v",
					uuid.Fmt(pid), err)
				continue
			}

			if inWAL {
				log.Printf("skipping rebuild of PrefixID %v on dead store, prefix in WAL", uuid.Fmt(pid))
				continue
			}

			err = m.rebuild(path)
			if err != nil {
				log.Printf("Couldn't rebuild %v: %v", path, err)
				continue
			}

			log.Printf("successfully rebuilt %v", path)
		}
	}

	return len(wantFiles) == 0, nil
}
Ejemplo n.º 7
0
func (m *Multi) rebalanceStep() error {
	finderEntries := m.finder.Stores()

	// bail out early if there's no rebalancing possible
	mostFree := int64(0)
	leastFree := int64(0)
	markedOne := false
	for _, fe := range finderEntries {
		if !markedOne {
			markedOne = true
			mostFree = fe.Free
			leastFree = fe.Free
		}
		if fe.Free > mostFree {
			mostFree = fe.Free
		}
		if fe.Free < leastFree {
			leastFree = fe.Free
		}
	}
	if mostFree-leastFree < rebalanceMinDifference {
		return nil
	}

	moved := 0
	scanned := 0
	defer func() {
		log.Printf("Rebalanced %v chunks out of %v scanned", moved, scanned)
	}()

	for moved < rebalanceFileCount && scanned < rebalanceMaxScan {
		var files []meta.File
		err := m.db.RunTx(func(ctx kvl.Ctx) error {
			files = nil

			l, err := meta.Open(ctx)
			if err != nil {
				return err
			}

			startkey, err := l.GetConfig("rebalpos")
			if err != nil {
				return err
			}

			files, err = l.ListFiles(string(startkey), 20)
			if err != nil {
				return err
			}

			if len(files) > 0 {
				err = l.SetConfig("rebalpos", []byte(files[len(files)-1].Path))
			} else {
				err = l.SetConfig("rebalpos", []byte{})
			}
			if err != nil {
				return err
			}

			return nil
		})
		if err != nil {
			return err
		}

		if len(files) == 0 {
			break
		}

		for _, f := range files {
			did, err := m.rebalanceFile(f, finderEntries)
			if err != nil {
				log.Printf("Failed to rebalance %v: %v", f.Path, err)
			}
			scanned++
			if did {
				moved++
				if moved >= rebalanceFileCount {
					break
				}
			}
			if scanned >= rebalanceMaxScan {
				break
			}
		}
	}

	return nil
}
Ejemplo n.º 8
0
func (m *Multi) rebalanceFile(f meta.File, finderEntries map[[16]byte]FinderEntry) (bool, error) {
	// search for the lowest free location that this file is stored on
	var minI int
	var minF int64
	var minS store.Store
	for i, l := range f.Locations {
		fe, ok := finderEntries[l]
		if !ok {
			continue
		}

		if minS == nil || minF > fe.Free {
			minS = fe.Store
			minF = fe.Free
			minI = i
		}
	}

	if minS == nil {
		return false, nil
	}

	// search for the highest free location that this file is NOT stored on
	var maxF int64
	var maxS store.Store
	for id, fe := range finderEntries {
		if fe.Dead {
			continue
		}

		found := false
		for _, locid := range f.Locations {
			if locid == id {
				found = true
				break
			}
		}
		if found {
			continue
		}

		if maxS == nil || maxF < fe.Free {
			maxF = fe.Free
			maxS = fe.Store
		}
	}

	if maxS == nil {
		return false, nil
	}

	if maxF-minF < rebalanceMinDifference {
		return false, nil
	}

	// we should move chunk minI on minS to maxS

	err := m.db.RunTx(func(ctx kvl.Ctx) error {
		l, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		return l.WALMark(f.PrefixID)
	})
	if err != nil {
		return false, err
	}
	defer func() {
		// TODO: how to handle errors here?
		m.db.RunTx(func(ctx kvl.Ctx) error {
			l, err := meta.Open(ctx)
			if err != nil {
				return err
			}

			return l.WALClear(f.PrefixID)
		})
	}()

	localKey := localKeyFor(&f, minI)
	data, st, err := minS.Get(localKey, store.GetOptions{})
	if err != nil {
		return false, err
	}

	setCASV := store.CASV{
		Present: true,
		SHA256:  st.SHA256,
		Data:    data,
	}

	err = maxS.CAS(localKey, store.MissingV, setCASV, nil)
	if err != nil {
		return false, err
	}

	newF := f
	newF.Locations = make([][16]byte, len(f.Locations))
	copy(newF.Locations, f.Locations)
	newF.Locations[minI] = maxS.UUID()

	err = m.db.RunTx(func(ctx kvl.Ctx) error {
		l, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		f2, err := l.GetFile(f.Path)
		if err != nil {
			return err
		}

		if f2.PrefixID != f.PrefixID {
			return errModifiedDuringBalance
		}

		if len(f2.Locations) != len(f.Locations) {
			return errModifiedDuringBalance
		}

		for i, floc := range f.Locations {
			if floc != f2.Locations[i] {
				return errModifiedDuringBalance
			}
		}

		err = l.SetFile(&newF)
		if err != nil {
			return err
		}

		return nil
	})
	if err != nil {
		maxS.CAS(localKey, setCASV, store.MissingV, nil) // ignore error
		return false, err
	}

	err = minS.CAS(localKey, setCASV, store.MissingV, nil)
	if err != nil {
		log.Printf("Couldn't remove rebalanced chunk from old location %v: %v",
			uuid.Fmt(maxS.UUID()), err)
	}

	fe := finderEntries[minS.UUID()]
	fe.Free += int64(len(data))
	finderEntries[minS.UUID()] = fe

	fe = finderEntries[maxS.UUID()]
	fe.Free -= int64(len(data))
	finderEntries[maxS.UUID()] = fe

	return true, nil
}
Ejemplo n.º 9
0
func (m *Multi) orderTargets() ([]store.Store, error) {
	m.mu.Lock()
	conf := m.config
	m.mu.Unlock()

	storesMap := make(map[[16]byte]store.Store)
	for id, fe := range m.finder.Stores() {
		storesMap[id] = fe.Store
	}

	var locs []meta.Location
	err := m.db.RunReadTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		locs, err = layer.AllLocations()
		return err
	})
	if err != nil {
		return nil, err
	}

	for _, loc := range locs {
		if loc.Dead {
			delete(storesMap, loc.UUID)
		}
	}

	if len(storesMap) < conf.Total {
		return nil, ErrInsufficientStores
	}

	weights := m.getStoreWeights()

	stores := make([]store.Store, 0, len(storesMap))
	for len(weights) > 0 {
		totalWeight := int64(0)
		for _, w := range weights {
			totalWeight += w
		}

		r := rand.Int63n(totalWeight)
		var chosenID [16]byte
		for id, w := range weights {
			if r <= w {
				chosenID = id
				break
			}
			r -= w
		}

		st := storesMap[chosenID]
		if st != nil {
			stores = append(stores, st)
		}

		delete(weights, chosenID)
	}

	return stores, nil
}
Ejemplo n.º 10
0
func (m *Multi) CAS(key string, from, to store.CASV, cancel <-chan struct{}) error {
	var file *meta.File
	prefixid := uuid.Gen4()

	if to.Present {
		// check before doing work; additionally, add a WAL entry
		err := m.db.RunTx(func(ctx kvl.Ctx) error {
			layer, err := meta.Open(ctx)
			if err != nil {
				return err
			}

			oldFile, err := layer.GetFile(key)
			if err != nil {
				return err
			}

			if !from.Any {
				if from.Present {
					if oldFile == nil {
						return store.ErrCASFailure
					}
					if oldFile.SHA256 != from.SHA256 {
						return store.ErrCASFailure
					}
				} else {
					if oldFile != nil {
						return store.ErrCASFailure
					}
				}
			}

			err = layer.WALMark(prefixid)
			if err != nil {
				return err
			}

			return nil
		})
		if err != nil {
			return err
		}

		file, err = m.writeChunks(key, to.Data, to.SHA256, prefixid)
		if err != nil {
			return err
		}
	}

	var oldFile *meta.File
	err := m.db.RunTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		oldFile, err = layer.GetFile(key)
		if err != nil {
			return err
		}

		if !from.Any {
			if from.Present {
				if oldFile == nil {
					return store.ErrCASFailure
				}
				if oldFile.SHA256 != from.SHA256 {
					return store.ErrCASFailure
				}
			} else {
				if oldFile != nil {
					return store.ErrCASFailure
				}
			}
		}

		if to.Present {
			err = layer.SetFile(file)
			if err != nil {
				return err
			}

			err = layer.WALClear(prefixid)
			if err != nil {
				return err
			}
		} else {
			if from.Present || from.Any {
				err = layer.RemoveFilePath(key)
				if err == kvl.ErrNotFound {
					if !from.Any {
						// internal inconsistency
						return store.ErrCASFailure
					}
				} else if err != nil {
					return err
				}
			}
		}

		return nil
	})
	if err != nil {
		if file != nil {
			m.asyncDeletions <- file
		}
		return err
	}

	if oldFile != nil {
		m.asyncDeletions <- oldFile
	}

	return nil
}
Ejemplo n.º 11
0
func TestFinderScan(t *testing.T) {
	db := ram.New()

	mock := storetests.NewMockStore(0)

	cs, err := chunkserver.New([]store.Store{mock})
	if err != nil {
		t.Fatalf("Couldn't create chunkserver: %v", err)
	}
	defer cs.Close()

	cs.WaitAllAvailable()

	killer := newKillHandler(cs)
	srv := httptest.NewServer(killer)
	defer srv.Close()

	f, err := NewFinder(db)
	if err != nil {
		t.Fatalf("Couldn't create new finder: %v", err)
	}
	defer f.Stop()

	err = f.Scan(srv.URL)
	if err != nil {
		t.Fatalf("Couldn't scan %v: %v", srv.URL, err)
	}

	// newly scanned store should be in the Finder
	stores := f.Stores()
	if _, ok := stores[mock.UUID()]; !ok {
		t.Fatalf("Finder did not find uuid of directory store")
	}

	// kill the store and update the Finder
	killer.setKilled(true)
	f.test(0)

	// should have been removed
	stores = f.Stores()
	if len(stores) > 0 {
		t.Fatalf("Finder did not remove dead store")
	}

	// but should stay in the DB
	err = db.RunReadTx(func(ctx kvl.Ctx) error {
		layer, err := meta.Open(ctx)
		if err != nil {
			return err
		}

		loc, err := layer.GetLocation(mock.UUID())
		if err != nil {
			return err
		}

		if loc == nil {
			return fmt.Errorf("No location in database")
		}

		return nil
	})
	if err != nil {
		t.Fatalf("Couldn't verify locations: %v", err)
	}

	// when the store comes back
	killer.setKilled(false)
	err = f.Rescan()
	if err != nil {
		t.Fatalf("Couldn't Rescan: %v", err)
	}

	// it should be there again
	stores = f.Stores()
	if _, ok := stores[mock.UUID()]; !ok {
		t.Fatalf("Finder did not find uuid of directory store after resurrection")
	}
}