Exemplo n.º 1
0
func (session *BackupSession) storePath(path string, toplevel bool) (entry *FileEntry, err error) {
	session.PrintStoreProgress(PROGRESS_INTERVAL_SECS)

	Debug("storePath %s", path)
	// Get file info from disk
	var info os.FileInfo
	{
		var isDir bool

		if toplevel {
			info, err = os.Stat(path) // At top level we follow symbolic links
		} else {
			info, err = os.Lstat(path) // At all other levels we do not
		}
		if info != nil {
			isDir = info.IsDir() // Check ignore even if we cannot open the file (to avoid output errors on files we already ignore)
		}
		if match, pattern := session.ignoreMatch(path, isDir); match {
			session.LogVerbose("Skipping (ignore %s) %s", pattern, path)
			return nil, nil
		}
		if err != nil {
			return nil, err
		}
	}

	entry = &FileEntry{
		FileName:    core.String(info.Name()),
		FileSize:    int64(info.Size()),
		FileMode:    uint32(info.Mode()),
		ModTime:     info.ModTime().UnixNano(),
		ReferenceID: session.State.StateID,
	}

	if entry.FileMode&uint32(os.ModeTemporary) > 0 {
		session.LogVerbose("Skipping (temporary file) %s", path)
		return nil, nil
	} else if entry.FileMode&uint32(os.ModeDevice) > 0 {
		session.LogVerbose("Skipping (device file) %s", path)
		return nil, nil
	} else if entry.FileMode&uint32(os.ModeNamedPipe) > 0 {
		session.LogVerbose("Skipping (named pipe file) %s", path)
		return nil, nil
	} else if entry.FileMode&uint32(os.ModeSocket) > 0 {
		session.LogVerbose("Skipping (socket file) %s", path)
		return nil, nil
	} else if entry.FileMode&uint32(os.ModeSymlink) > 0 {
		entry.ContentType = ContentTypeSymLink
		entry.FileSize = 0

		sym, err := os.Readlink(path)
		if err != nil {
			return nil, err
		}
		entry.FileLink = core.String(sym)

		refEntry := session.reference.findReference(path)
		if refEntry != nil && refEntry.FileName == entry.FileName && refEntry.FileMode == entry.FileMode && refEntry.ModTime == entry.ModTime && refEntry.FileLink == entry.FileLink {
			// It's the same!
			entry = refEntry
			if entry.ReferenceID.Compare(session.State.StateID) != 0 {
				session.UnchangedFiles++
			}
		} else {
			session.LogVerbose("SYMLINK %s -> %s", path, sym)
		}

		session.Files++
		session.reference.storeReference(entry)

	} else if entry.FileMode&uint32(os.ModeDir) > 0 {
		entry.ContentType = ContentTypeDirectory
		entry.FileSize = 0

		reservation := session.reference.reserveReference(entry) // We do this because directories needs to be written before files, but we also need contentblockID to be correct
		defer session.reference.storeReferenceDir(entry, reservation)

		refEntry := session.reference.findReference(path)

		if entry.ContentBlockID, err = session.storeDir(path, refEntry); err != nil {
			return nil, err
		}
		if refEntry != nil && bytes.Equal(refEntry.ContentBlockID[:], entry.ContentBlockID[:]) {
			entry.ReferenceID = refEntry.ReferenceID
		}
		session.Directories++
	} else {
		refEntry := session.reference.findReference(path)
		if refEntry != nil && refEntry.FileName == entry.FileName && refEntry.FileSize == entry.FileSize && refEntry.FileMode == entry.FileMode && refEntry.ModTime == entry.ModTime {
			// It's the same!
			entry = refEntry

			if entry.ReferenceID.Compare(session.State.StateID) != 0 {
				session.Client.Paint(" ")
				session.UnchangedFiles++

				if !session.reference.loaded { // We are using unique as a diff-size, so first backup (with no reference) has full diff-size
					// TODO: UniqueSize is a here calculated by the backup routine, it should be calculated by the server?
					session.State.UniqueSize += entry.FileSize
				}
			} else {
				// Resuming backup, still count it as unique
				session.State.UniqueSize += entry.FileSize
			}
		} else {
			if entry.FileSize > 0 {
				session.LogVerbose("%s", path)
				if err = session.storeFile(path, entry); err != nil {
					if e, ok := err.(*os.PathError); ok && runtime.GOOS == "windows" && e.Err == syscall.Errno(0x20) { // Windows ERROR_SHARING_VIOLATION
						return refEntry, err // Returning refEntry here in case this file existed and could be opened in a previous backup
					}
					return nil, err
				}
				// TODO: UniqueSize is a here calculated by the backup routine, it should be calculated by the server?
				session.State.UniqueSize += entry.FileSize
			}
		}
		session.Files++
		session.State.Size += entry.FileSize

		session.reference.storeReference(entry)
	}
	return
}
Exemplo n.º 2
0
func run() int {
	bytearray.EnableAutoGC(60, 74)

	runtime.SetBlockProfileRate(1000)
	go func() {
		log.Println(http.ListenAndServe(":6060", nil))
	}()

	//defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop()

	/*defer func() {
		// Panic error handling
		if r := recover(); r != nil {
			fmt.Println(r)
			return 1
		}
	}()*/

	var err error

	exeRoot, _ := osext.ExecutableFolder()

	var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT)
	datDirectory = filepath.Join(exeRoot, "data")
	idxDirectory = filepath.Join(exeRoot, "index")

	cmd.Title = fmt.Sprintf("Hashbox Server %s", Version)
	cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard)
	cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard)
	cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard)
	var loglvl int64 = int64(core.LogInfo)
	cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() {
		core.LogLevel = int(loglvl)
	})

	// Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers
	// start opening files on their own
	// TODO: remove datPath global and send them into handlers on creation instead
	accountHandler = NewAccountHandler()
	defer accountHandler.Close()
	storageHandler = NewStorageHandler()
	defer storageHandler.Close()

	cmd.Command("", "", func() { // Default
		serverAddr := net.TCPAddr{nil, int(serverPort), ""}

		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		var listener *net.TCPListener
		if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil {
			panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error())))
		}
		core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String())

		done = make(chan bool)
		defer close(done)

		signalchan := make(chan os.Signal, 1)
		defer close(signalchan)
		signal.Notify(signalchan, os.Interrupt)
		signal.Notify(signalchan, os.Kill)
		go func() {
			for s := range signalchan {
				core.Log(core.LogInfo, "Received OS signal: %v", s)
				listener.Close()
				// done <- true
				return
			}
		}()

		go connectionListener(listener)

		go func() {
			var lastStats string
			for { // ever
				time.Sleep(10 * time.Second)
				s := core.MemoryStats()
				if s != lastStats {
					fmt.Println(s)
					lastStats = s
				}
			}
		}()

		// blocking channel read
		select {
		case <-done:
		case <-accountHandler.signal:
		case <-storageHandler.signal:
		}

		core.Log(core.LogInfo, "Hashbox Server terminating")
	})

	// TODO: This is a temporary hack to allow creation of hashback users on the server side
	// It should be an interface to an adminsitrative tool instead
	cmd.Command("adduser", "<username> <password>", func() {
		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		if len(cmd.Args) < 4 {
			panic(errors.New("Missing argument to adduser command"))
		}

		if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) {
			panic(errors.New("Error creating account"))
		}
		accountNameH := core.Hash([]byte(cmd.Args[2]))
		dataEncryptionKey := core.GenerateDataEncryptionKey()
		core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey)
		core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3]))

		var blockData bytearray.ByteArray
		blockData.Write(dataEncryptionKey[:])
		block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil)
		if !storageHandler.writeBlock(block) {
			panic(errors.New("Error writing key block"))
		}
		err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID})
		PanicOn(err)

		block.Release()
		core.Log(core.LogInfo, "User added")
	})

	var doRepair bool
	var doRebuild bool
	var skipData, skipMeta, skipIndex bool
	cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard)
	cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard)
	cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard)
	cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard)
	cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard)
	cmd.Command("check-storage", "", func() {
		if doRebuild {
			if len(cmd.Args) > 2 {
				panic("Start and end file arguments are not valid in combination with rebuild")
			}
			doRepair = true
		}

		startfile := int32(0)
		endfile := int32(-1)
		if len(cmd.Args) > 2 {
			i, err := strconv.ParseInt(cmd.Args[2], 0, 32)
			if err != nil {
				panic(err)
			}
			startfile = int32(i)
			core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile)
		}
		if len(cmd.Args) > 3 {
			i, err := strconv.ParseInt(cmd.Args[3], 0, 32)
			if err != nil {
				panic(err)
			}
			endfile = int32(i)
			core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile)
		}

		if doRepair {
			if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
				panic(err)
			} else {
				defer lock.Unlock()
			}
		}

		start := time.Now()

		if doRebuild {
			core.Log(core.LogInfo, "Removing index files")
			storageHandler.RemoveFiles(storageFileTypeIndex)
			core.Log(core.LogInfo, "Removing meta files")
			storageHandler.RemoveFiles(storageFileTypeMeta)
		}

		core.Log(core.LogInfo, "Checking all storage files")
		repaired, critical := storageHandler.CheckFiles(doRepair)
		if !skipData && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking data files")
			r, c := storageHandler.CheckData(doRepair, startfile, endfile)
			repaired += r
			critical += c
		}
		if !skipMeta && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking meta files")
			storageHandler.CheckMeta()
		}

		if !skipIndex && (doRepair || critical == 0) {
			core.Log(core.LogInfo, "Checking index files")
			storageHandler.CheckIndexes(doRepair)
		}

		if doRepair || critical == 0 {
			core.Log(core.LogInfo, "Checking dataset transactions")
			rootlist := accountHandler.RebuildAccountFiles()

			core.Log(core.LogInfo, "Checking block chain integrity")
			verified := make(map[core.Byte128]bool) // Keep track of verified blocks
			for i, r := range rootlist {
				tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:])
				core.Log(core.LogDebug, "CheckChain on %s", tag)
				c := storageHandler.CheckChain(r.BlockID, tag, verified)
				if c > 0 {
					accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID)
				}
				critical += c

				p := int(i * 100 / len(rootlist))
				fmt.Printf("%d%%\r", p)
			}
		}

		if critical > 0 {
			core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical)
		}
		if repaired > 0 {
			core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired)
		}
		if critical == 0 && repaired == 0 {
			core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes())
		}
	})

	var doCompact bool
	var deadSkip int64 = 5
	var skipSweep bool
	var doForce bool
	cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard)
	cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard)
	cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard)
	cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard)
	cmd.Command("gc", "", func() {
		if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil {
			panic(err)
		} else {
			defer lock.Unlock()
		}

		start := time.Now()
		if !skipSweep {
			core.Log(core.LogInfo, "Marking index entries")
			var roots []core.Byte128
			for _, r := range accountHandler.CollectAllRootBlocks(doForce) {
				roots = append(roots, r.BlockID)
			}
			storageHandler.MarkIndexes(roots, true)
			storageHandler.SweepIndexes(true)
			core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes())
			storageHandler.ShowStorageDeadSpace()
		}
		if doCompact {
			storageHandler.CompactIndexes(true)
			storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip))
			storageHandler.CompactAll(storageFileTypeData, int(deadSkip))
		}
		core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes())
	})

	err = cmd.Parse()
	PanicOn(err)

	fmt.Println(core.MemoryStats())

	return 0
}