Ejemplo n.º 1
0
func main() {
	bytearray.EnableAutoGC(60, 74)

	var lockFile *lockfile.LockFile

	runtime.SetBlockProfileRate(1000)
	go func() {
		log.Println(http.ListenAndServe(":6060", nil))
	}()

	defer func() {
		// Panic error handling
		if !DEBUG {
			if r := recover(); r != nil {
				fmt.Println(r)
				if lockFile != nil {
					lockFile.Unlock()
				}
				os.Exit(1)
			}
		}
	}()

	// Figure out where to load/save options
	{
		LocalStoragePath = filepath.Join(".", ".hashback")
		home, err := filepath.Abs(userHomeFolder())
		if err == nil {
			s, err := os.Stat(home)
			if err == nil && s.IsDir() {
				LocalStoragePath = filepath.Join(home, ".hashback")
			}
		}
		err = os.MkdirAll(filepath.Dir(LocalStoragePath), 0700)
		PanicOn(err)
	}

	session := NewBackupSession()

	cmd.Title = fmt.Sprintf("Hashback %s (Hashbox Backup Client)", Version)

	cmd.OptionsFile = filepath.Join(LocalStoragePath, "options.json")

	cmd.BoolOption("debug", "", "Debug output", &DEBUG, cmd.Hidden)

	var queueSizeMB int64
	cmd.IntOption("queuesize", "", "<MiB>", "Change sending queue size", &queueSizeMB, cmd.Hidden|cmd.Preference).OnChange(func() {
		SendingQueueSize = queueSizeMB * 1024 * 1024
	})

	cmd.StringOption("user", "", "<username>", "Username", &session.User, cmd.Preference|cmd.Required)
	var accesskey []byte
	cmd.ByteOption("accesskey", "", "", "Hashbox server accesskey", &accesskey, cmd.Preference|cmd.Hidden).OnChange(func() {
		var key core.Byte128
		copy(key[:16], accesskey[:])
		session.AccessKey = &key
	})
	var backupkey []byte
	cmd.ByteOption("backupkey", "", "", "Hashbox server backupkey", &backupkey, cmd.Preference|cmd.Hidden).OnChange(func() {
		var key core.Byte128
		copy(key[:16], backupkey[:])
		session.BackupKey = &key
	})
	var password string
	cmd.StringOption("password", "", "<password>", "Password", &password, cmd.Standard).OnChange(func() {
		{
			key := GenerateAccessKey(session.User, password)
			session.AccessKey = &key
			accesskey = session.AccessKey[:]
		}
		{
			key := GenerateBackupKey(session.User, password)
			session.BackupKey = &key
			backupkey = session.BackupKey[:]
		}
	}).OnSave(func() {
		if session.User == "" {
			panic(errors.New("Unable to save login unless both user and password options are specified"))
		}
	})

	cmd.StringOption("server", "", "<ip>:<port>", "Hashbox server address", &session.ServerString, cmd.Preference|cmd.Required)
	cmd.BoolOption("full", "", "Force a non-incremental store", &session.FullBackup, cmd.Preference)
	cmd.BoolOption("verbose", "", "Show verbose output", &session.Verbose, cmd.Preference)
	cmd.BoolOption("progress", "", "Show progress during store", &session.ShowProgress, cmd.Preference)
	cmd.BoolOption("paint", "", "Paint!", &session.Paint, cmd.Preference).OnChange(func() {
		if session.Paint {
			session.Verbose = false
			session.ShowProgress = false
		}
	})

	cmd.Command("info", "", func() {
		session.Connect()
		defer session.Close(true)

		info := session.Client.GetAccountInfo()
		var hashbackEnabled bool = false
		var dlist []core.Dataset
		for _, d := range info.DatasetList {
			if d.Name == "\x07HASHBACK_DEK" {
				hashbackEnabled = true
			}
			if d.Name[0] > 32 {
				dlist = append(dlist, d)
			}
		}

		// fmt.Println("* TODO: Add quota and total size info")
		if hashbackEnabled {
			fmt.Println("Account is setup for Hashback")
		} else {
			fmt.Println("Account is NOT setup for Hashback")
		}
		fmt.Println("")
		if len(dlist) == 0 {
			fmt.Println("No datasets")
		} else {
			fmt.Println("Size        Dataset")
			fmt.Println("--------    -------")
			for _, d := range dlist {
				fmt.Printf("%8s    %s\n", core.HumanSize(d.Size), d.Name)
			}
		}

	})
	cmd.Command("list", "<dataset> [(<backup id>|.) [\"<path>\"]]", func() {
		if len(cmd.Args) < 3 {
			panic(errors.New("Missing dataset argument"))
		}

		session.Connect()
		defer session.Close(true)

		list := session.Client.ListDataset(cmd.Args[2])
		if len(list.States) > 0 {

			if len(cmd.Args) < 4 {

				fmt.Println("Backup id                           Backup start                 Total size     Diff prev")
				fmt.Println("--------------------------------    -------------------------    ----------    ----------")

				for _, e := range list.States {
					timestamp := binary.BigEndian.Uint64(e.State.StateID[:])
					date := time.Unix(0, int64(timestamp))

					if e.StateFlags&core.StateFlagInvalid == core.StateFlagInvalid {
						fmt.Printf("%-32x    %-25s    !!! INVALID DATASET\n", e.State.StateID, date.Format(time.RFC3339))
					} else {
						fmt.Printf("%-32x    %-25s    %10s    %10s\n", e.State.StateID, date.Format(time.RFC3339), core.HumanSize(e.State.Size), core.HumanSize(e.State.UniqueSize))
					}
				}
			} else {
				var state *core.DatasetState
				for i, e := range list.States {
					if e.StateFlags&core.StateFlagInvalid != core.StateFlagInvalid && cmd.Args[3] == "." || fmt.Sprintf("%x", e.State.StateID[:]) == cmd.Args[3] {
						state = &list.States[i].State
					}
				}
				if state == nil {
					panic(errors.New("Backup id not found"))
				}

				var filelist []*FileEntry
				var listpath string = "*"
				if len(cmd.Args) > 4 {
					listpath = cmd.Args[4]
				}
				var err error
				filelist, err = session.findPathMatch(state.BlockID, listpath)
				if err != nil {
					panic(err)
				}

				fmt.Printf("Listing %s\n", listpath)
				if len(filelist) > 0 {
					for _, f := range filelist {
						var niceDate, niceSize string
						date := time.Unix(0, int64(f.ModTime))

						if time.Since(date).Hours() > 24*300 { // 300 days
							niceDate = date.Format("Jan _2  2006")
						} else {
							niceDate = date.Format("Jan _2 15:04")
						}
						if f.ContentType != ContentTypeDirectory {
							niceSize = core.ShortHumanSize(f.FileSize)
						}
						fmt.Printf("%-10s  %6s   %-12s   %s\n", os.FileMode(f.FileMode), niceSize, niceDate /*date.Format(time.RFC3339)*/, f.FileName)
					}
				} else {
					fmt.Println("No files matching")
				}
			}
		} else {
			fmt.Println("Dataset is empty or does not exist")
		}

	})

	var pidName string = ""
	var retainWeeks int64 = 0
	var retainDays int64 = 0
	var intervalBackup int64 = 0
	cmd.StringOption("pid", "store", "<filename>", "Create a PID file (lock-file)", &pidName, cmd.Standard)
	cmd.StringListOption("ignore", "store", "<pattern>", "Ignore files matching pattern", &DefaultIgnoreList, cmd.Standard|cmd.Preference)
	cmd.IntOption("interval", "store", "<minutes>", "Keep running backups every <minutes> until interrupted", &intervalBackup, cmd.Standard)
	cmd.IntOption("retaindays", "store", "<days>", "Remove backups older than 24h but keep one per day for <days>, 0 = keep all daily", &retainDays, cmd.Standard|cmd.Preference)
	cmd.IntOption("retainweeks", "store", "<weeks>", "Remove backups older than 24h but keep one per week for <weeks>, 0 = keep all weekly", &retainWeeks, cmd.Standard|cmd.Preference)
	cmd.Command("store", "<dataset> (<folder> | <file>)...", func() {
		for _, d := range DefaultIgnoreList {
			ignore := ignoreEntry{pattern: d, match: core.ExpandEnv(d)} // Expand ignore patterns

			if ignore.match == "" {
				continue
			}
			if _, err := filepath.Match(ignore.match, "ignore"); err != nil {
				panic(errors.New(fmt.Sprintf("Invalid ignore pattern %s", ignore.pattern)))
			}

			if os.IsPathSeparator(ignore.match[len(ignore.match)-1]) {
				ignore.match = ignore.match[:len(ignore.match)-1]
				ignore.dirmatch = true
			}
			if strings.IndexRune(ignore.match, os.PathSeparator) >= 0 { // path in pattern
				ignore.pathmatch = true
			}

			session.ignoreList = append(session.ignoreList, ignore)
		}

		if len(cmd.Args) < 3 {
			panic(errors.New("Missing dataset argument"))
		}
		if len(cmd.Args) < 4 {
			panic(errors.New("Missing source file or folder argument"))
		}

		if pidName != "" {
			var err error
			if lockFile, err = lockfile.Lock(pidName); err != nil {
				fmt.Println(err)
				os.Exit(0)
			} else {
				defer lockFile.Unlock()
			}
		}

		var latestBackup uint64
		for latestBackup == 0 || intervalBackup > 0 { // Keep looping if interval backupping

			func() {
				session.Connect()
				defer session.Close(true)
				if latestBackup > 0 || intervalBackup == 0 {
					session.State = &core.DatasetState{StateID: session.Client.SessionNonce}
					session.Store(cmd.Args[2], cmd.Args[3:]...)
					if retainWeeks > 0 || retainDays > 0 {
						session.Retention(cmd.Args[2], int(retainDays), int(retainWeeks))
					}
					latestBackup = binary.BigEndian.Uint64(session.State.StateID[:])
					date := time.Unix(0, int64(latestBackup))
					fmt.Printf("Backup %s %x (%s) completed\n", cmd.Args[2], session.State.StateID[:], date.Format(time.RFC3339))
				} else {
					list := session.Client.ListDataset(cmd.Args[2])
					for i := len(list.States) - 1; i >= 0; i-- {
						if list.States[i].StateFlags&core.StateFlagInvalid != core.StateFlagInvalid {
							latestBackup = binary.BigEndian.Uint64(list.States[i].State.StateID[:])
							break
						}
					}
				}
			}()
			if intervalBackup > 0 {
				date := time.Unix(0, int64(latestBackup)).Add(time.Duration(intervalBackup) * time.Minute)
				if date.After(time.Now()) {
					fmt.Printf("Next backup scheduled for %s\n", date.Format(time.RFC3339))
					// fmt.Println(time.Since(date))
					time.Sleep(-time.Since(date))
				} else {
					latestBackup = 1 // trigger a new backup already
				}
			}
		}
	})

	cmd.Command("restore", "<dataset> (<backup id>|.) [\"<path>\"...] <dest-folder>", func() {
		if len(cmd.Args) < 3 {
			panic(errors.New("Missing dataset argument"))
		}
		if len(cmd.Args) < 4 {
			panic(errors.New("Missing backup id (or \".\")"))
		}
		if len(cmd.Args) < 5 {
			panic(errors.New("Missing destination folder argument"))
		}

		session.Connect()
		defer session.Close(true)

		list := session.Client.ListDataset(cmd.Args[2])

		var stateid string = cmd.Args[3]
		var restorepath string = cmd.Args[len(cmd.Args)-1]
		var restorelist []string = cmd.Args[4 : len(cmd.Args)-1]

		var found int = -1
		if stateid == "." {
			found = len(list.States) - 1
		} else {
			for i, e := range list.States {
				if stateid == fmt.Sprintf("%x", e.State.StateID) {
					found = i
					break
				}
			}
			if found < 0 {
				panic(errors.New("Backup id " + cmd.Args[3] + " not found in dataset " + cmd.Args[2]))
			}
		}
		if found < 0 {
			panic(errors.New("No backup found under dataset " + cmd.Args[2]))
		}

		timestamp := binary.BigEndian.Uint64(list.States[found].State.StateID[:])
		date := time.Unix(0, int64(timestamp))
		fmt.Printf("Restoring from %x (%s) to path %s\n", list.States[found].State.StateID, date.Format(time.RFC3339), restorepath)

		session.Restore(list.States[found].State.BlockID, restorepath, restorelist...)
	})
	cmd.Command("diff", "<dataset> (<backup id>|.) [\"<path>\"...] <local-folder>", func() {
		if len(cmd.Args) < 3 {
			panic(errors.New("Missing dataset argument"))
		}
		if len(cmd.Args) < 4 {
			panic(errors.New("Missing backup id (or \".\")"))
		}
		if len(cmd.Args) < 5 {
			panic(errors.New("Missing destination folder argument"))
		}

		session.Connect()
		defer session.Close(true)

		list := session.Client.ListDataset(cmd.Args[2])

		var stateid string = cmd.Args[3]
		var restorepath string = cmd.Args[len(cmd.Args)-1]
		var restorelist []string = cmd.Args[4 : len(cmd.Args)-1]

		var found int = -1
		if stateid == "." {
			found = len(list.States) - 1
		} else {
			for i, e := range list.States {
				if stateid == fmt.Sprintf("%x", e.State.StateID) {
					found = i
					break
				}
			}
			if found < 0 {
				panic(errors.New("Backup id " + cmd.Args[3] + " not found in dataset " + cmd.Args[2]))
			}
		}
		if found < 0 {
			panic(errors.New("No backup found under dataset " + cmd.Args[2]))
		}

		timestamp := binary.BigEndian.Uint64(list.States[found].State.StateID[:])
		date := time.Unix(0, int64(timestamp))
		fmt.Printf("Comparing %x (%s) to path %s\n", list.States[found].State.StateID, date.Format(time.RFC3339), restorepath)

		session.DiffRestore(list.States[found].State.BlockID, restorepath, restorelist...)
		if session.DifferentFiles > 0 {
			os.Exit(2)
		}
	})
	cmd.Command("remove", "<dataset> <backup id>", func() {
		if len(cmd.Args) < 3 {
			panic(errors.New("Missing dataset argument"))
		}
		if len(cmd.Args) < 4 {
			panic(errors.New("Missing backup id"))
		}

		session.Connect()
		defer session.Close(true)

		list := session.Client.ListDataset(cmd.Args[2])

		var stateid string = cmd.Args[3]
		var found int = -1
		for i, e := range list.States {
			if stateid == fmt.Sprintf("%x", e.State.StateID) {
				found = i
				break
			}
		}
		if found < 0 {
			panic(errors.New("Backup id " + cmd.Args[3] + " not found in dataset " + cmd.Args[2]))
		}

		fmt.Printf("Removing backup %x from %s\n", list.States[found].State.StateID, cmd.Args[2])
		session.Client.RemoveDatasetState(cmd.Args[2], list.States[found].State.StateID)
	})

	signalchan := make(chan os.Signal, 1)
	defer close(signalchan)
	signal.Notify(signalchan, os.Interrupt)
	signal.Notify(signalchan, os.Kill)
	go func() {
		for range signalchan {
			if session != nil {
				session.Close(false)
			}
			if lockFile != nil {
				lockFile.Unlock()
			}
			os.Exit(2)
		}
	}()

	if err := cmd.Parse(); err != nil {
		panic(err)
	}
}
Ejemplo n.º 2
0
func run() int {
	bytearray.EnableAutoGC(60, 74)

	runtime.SetBlockProfileRate(1000)
	go func() {
		log.Println(http.ListenAndServe(":6060", nil))
	}()

	//defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop()

	/*defer func() {
		// Panic error handling
		if r := recover(); r != nil {
			fmt.Println(r)
			return 1
		}
	}()*/

	var err error

	exeRoot, _ := osext.ExecutableFolder()

	var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT)
	datDirectory = filepath.Join(exeRoot, "data")
	idxDirectory = filepath.Join(exeRoot, "index")

	cmd.Title = fmt.Sprintf("Hashbox Server %s", Version)
	cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard)
	cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard)
	cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard)
	var loglvl int64 = int64(core.LogInfo)
	cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() {
		core.LogLevel = int(loglvl)
	})

	// Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers
	// start opening files on their own
	// TODO: remove datPath global and send them into handlers on creation instead
	accountHandler = NewAccountHandler()
	defer accountHandler.Close()
	storageHandler = NewStorageHandler()
	defer storageHandler.Close()

	cmd.Command("", "", func() { // Default
		serverAddr := net.TCPAddr{nil, int(serverPort), ""}

		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		var listener *net.TCPListener
		if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil {
			panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error())))
		}
		core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String())

		done = make(chan bool)
		defer close(done)

		signalchan := make(chan os.Signal, 1)
		defer close(signalchan)
		signal.Notify(signalchan, os.Interrupt)
		signal.Notify(signalchan, os.Kill)
		go func() {
			for s := range signalchan {
				core.Log(core.LogInfo, "Received OS signal: %v", s)
				listener.Close()
				// done <- true
				return
			}
		}()

		go connectionListener(listener)

		go func() {
			var lastStats string
			for { // ever
				time.Sleep(10 * time.Second)
				s := core.MemoryStats()
				if s != lastStats {
					fmt.Println(s)
					lastStats = s
				}
			}
		}()

		// blocking channel read
		select {
		case <-done:
		case <-accountHandler.signal:
		case <-storageHandler.signal:
		}

		core.Log(core.LogInfo, "Hashbox Server terminating")
	})

	// TODO: This is a temporary hack to allow creation of hashback users on the server side
	// It should be an interface to an adminsitrative tool instead
	cmd.Command("adduser", "<username> <password>", func() {
		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		if len(cmd.Args) < 4 {
			panic(errors.New("Missing argument to adduser command"))
		}

		if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) {
			panic(errors.New("Error creating account"))
		}
		accountNameH := core.Hash([]byte(cmd.Args[2]))
		dataEncryptionKey := core.GenerateDataEncryptionKey()
		core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey)
		core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3]))

		var blockData bytearray.ByteArray
		blockData.Write(dataEncryptionKey[:])
		block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil)
		if !storageHandler.writeBlock(block) {
			panic(errors.New("Error writing key block"))
		}
		err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID})
		PanicOn(err)

		block.Release()
		core.Log(core.LogInfo, "User added")
	})

	var doRepair bool
	var doRebuild bool
	var skipData, skipMeta, skipIndex bool
	cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard)
	cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard)
	cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard)
	cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard)
	cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard)
	cmd.Command("check-storage", "", func() {
		if doRebuild {
			if len(cmd.Args) > 2 {
				panic("Start and end file arguments are not valid in combination with rebuild")
			}
			doRepair = true
		}

		startfile := int32(0)
		endfile := int32(-1)
		if len(cmd.Args) > 2 {
			i, err := strconv.ParseInt(cmd.Args[2], 0, 32)
			if err != nil {
				panic(err)
			}
			startfile = int32(i)
			core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile)
		}
		if len(cmd.Args) > 3 {
			i, err := strconv.ParseInt(cmd.Args[3], 0, 32)
			if err != nil {
				panic(err)
			}
			endfile = int32(i)
			core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile)
		}

		if doRepair {
			if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
				panic(err)
			} else {
				defer lock.Unlock()
			}
		}

		start := time.Now()

		if doRebuild {
			core.Log(core.LogInfo, "Removing index files")
			storageHandler.RemoveFiles(storageFileTypeIndex)
			core.Log(core.LogInfo, "Removing meta files")
			storageHandler.RemoveFiles(storageFileTypeMeta)
		}

		core.Log(core.LogInfo, "Checking all storage files")
		repaired, critical := storageHandler.CheckFiles(doRepair)
		if !skipData && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking data files")
			r, c := storageHandler.CheckData(doRepair, startfile, endfile)
			repaired += r
			critical += c
		}
		if !skipMeta && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking meta files")
			storageHandler.CheckMeta()
		}

		if !skipIndex && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking index files")
			storageHandler.CheckIndexes(doRepair)
		}

		if doRepair || critical == 0 {
			core.Log(core.LogInfo, "Checking dataset transactions")
			rootlist := accountHandler.RebuildAccountFiles()

			core.Log(core.LogInfo, "Checking block chain integrity")
			verified := make(map[core.Byte128]bool) // Keep track of verified blocks
			for i, r := range rootlist {
				tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:])
				core.Log(core.LogDebug, "CheckChain on %s", tag)
				c := storageHandler.CheckChain(r.BlockID, tag, verified)
				if c > 0 {
					accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID)
				}
				critical += c

				p := int(i * 100 / len(rootlist))
				fmt.Printf("%d%%\r", p)
			}
		}

		if critical > 0 {
			core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical)
		}
		if repaired > 0 {
			core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired)
		}
		if critical == 0 && repaired == 0 {
			core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes())
		}
	})

	var doCompact bool
	var deadSkip int64 = 5
	var skipSweep bool
	var doForce bool
	cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard)
	cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard)
	cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard)
	cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard)
	cmd.Command("gc", "", func() {
		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		start := time.Now()
		if !skipSweep {
			core.Log(core.LogInfo, "Marking index entries")
			var roots []core.Byte128
			for _, r := range accountHandler.CollectAllRootBlocks(doForce) {
				roots = append(roots, r.BlockID)
			}
			storageHandler.MarkIndexes(roots, true)
			storageHandler.SweepIndexes(true)
			core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes())
			storageHandler.ShowStorageDeadSpace()
		}
		if doCompact {
			storageHandler.CompactIndexes(true)
			storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip))
			storageHandler.CompactAll(storageFileTypeData, int(deadSkip))
		}
		core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes())
	})

	err = cmd.Parse()
	PanicOn(err)

	fmt.Println(core.MemoryStats())

	return 0
}