func ZlibCompress(src bytearray.ByteArray) (dst bytearray.ByteArray) { src.ReadSeek(0, bytearray.SEEK_SET) zw := zpool.GetWriter(&dst) CopyOrPanic(zw, &src) zw.Close() zpool.PutWriter(zw) return dst }
func ZlibUncompress(src bytearray.ByteArray) (dst bytearray.ByteArray) { src.ReadSeek(0, bytearray.SEEK_SET) zr, err := zlib.NewReader(&src) if err != nil { panic(err) } defer zr.Close() CopyOrPanic(&dst, zr) return dst }
func TestClientServerHashboxBlocks(t *testing.T) { var err error conn, err := net.Dial("tcp", "127.0.0.1:1248") if err != nil { panic(err) } conn.SetDeadline(time.Now().Add(10 * time.Minute)) client := core.NewClient(conn, "test account", core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password")))) defer client.Close(true) { var data bytearray.ByteArray data.Write([]byte("HELLO!")) blockID := client.StoreData(core.BlockDataTypeRaw, data, nil) client.Commit() block := client.ReadBlock(blockID) if !reflect.DeepEqual(blockID, block.BlockID) { t.Error("Received block has wrong ID") } rdata, _ := block.Data.ReadSlice() if !bytes.Equal(rdata, []byte("HELLO!")) { t.Error("Block contains wrong data, expected \"HELLO!\" received \"" + string(rdata) + "\"") } } { var data bytearray.ByteArray data.Write([]byte("Bet it all on black?")) blockID := client.StoreData(core.BlockDataTypeRaw, data, nil) client.Commit() block := client.ReadBlock(blockID) if !reflect.DeepEqual(blockID, block.BlockID) { t.Error("Received block has wrong ID") } rdata, _ := block.Data.ReadSlice() if !bytes.Equal(rdata, []byte("Bet it all on black?")) { t.Error("Block contains wrong data, expected \"Bet it all on black?\" received \"" + string(rdata) + "\"") } } }
func (session *BackupSession) storeFile(path string, entry *FileEntry) (err error) { defer func() { // Panic error handling if r := recover(); r != nil { // we need this because some obscure files on OSX does open but then generates "bad file descriptor" on read if e, ok := r.(*os.PathError); ok && e.Err == syscall.EBADF { err = e.Err } else { panic(r) // Any other error is not normal and should panic } } }() var links []core.Byte128 chain := FileChainBlock{} var file *os.File if file, err = os.Open(path); err != nil { return err } defer file.Close() var maxSum rollsum.Rollsum maxSum.Init() var fileData bytearray.ByteArray defer fileData.Release() for offset := int64(0); offset < int64(entry.FileSize); { Debug("storeFile(%s) offset %d", path, offset) session.PrintStoreProgress(PROGRESS_INTERVAL_SECS) var left int64 = int64(entry.FileSize) - offset var maxBlockSize int = MAX_BLOCK_SIZE if left < int64(maxBlockSize) { maxBlockSize = int(left) } var blockData bytearray.ByteArray // Fill the fileData buffer core.CopyNOrPanic(&fileData, file, maxBlockSize-fileData.Len()) fileData.ReadSeek(0, os.SEEK_CUR) // TODO: figure out why this line is here because I do not remember var splitPosition int = fileData.Len() if fileData.Len() > MIN_BLOCK_SIZE*2 { // Candidate for rolling sum split rollIn, rollOut := fileData, fileData // Shallow copy the file data rollInBase, rollOutBase := 0, 0 rollInPos, rollOutPos := 0, 0 rollInSlice, _ := rollIn.ReadSlice() rollOutSlice, _ := rollOut.ReadSlice() partSum := maxSum var maxd = uint32(0) for rollInPos < fileData.Len() { if rollInPos-rollInBase >= len(rollInSlice) { // Next slice please rollInBase, _ = rollIn.ReadSeek(len(rollInSlice), os.SEEK_CUR) rollInSlice, _ = rollIn.ReadSlice() } if rollOutPos-rollOutBase >= len(rollOutSlice) { // Next slice please rollOutBase, _ = rollOut.ReadSeek(len(rollOutSlice), os.SEEK_CUR) rollOutSlice, _ = rollOut.ReadSlice() } if rollInPos >= MIN_BLOCK_SIZE { partSum.Rollout(rollOutSlice[rollOutPos-rollOutBase]) rollOutPos++ } partSum.Rollin(rollInSlice[rollInPos-rollInBase]) rollInPos++ if rollInPos >= MIN_BLOCK_SIZE { d := partSum.Digest() if d >= maxd { maxd = d splitPosition = rollInPos maxSum = partSum // Keep the sum so we can continue from here } } } } // Split an swap right := fileData.Split(splitPosition) blockData = fileData fileData = right offset += int64(blockData.Len()) session.ReadData += int64(blockData.Len()) // TODO: add encryption and custom compression here var datakey core.Byte128 id := session.Client.StoreData(core.BlockDataTypeZlib, blockData, nil) links = append(links, id) chain.ChainBlocks = append(chain.ChainBlocks, id) chain.DecryptKeys = append(chain.DecryptKeys, datakey) } if len(chain.ChainBlocks) > 1 { id := session.Client.StoreData(core.BlockDataTypeZlib, SerializeToByteArray(chain), links) entry.ContentType = ContentTypeFileChain entry.ContentBlockID = id } else { entry.ContentType = ContentTypeFileData entry.ContentBlockID = chain.ChainBlocks[0] entry.DecryptKey = chain.DecryptKeys[0] } return nil }
func run() int { bytearray.EnableAutoGC(60, 74) runtime.SetBlockProfileRate(1000) go func() { log.Println(http.ListenAndServe(":6060", nil)) }() //defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop() /*defer func() { // Panic error handling if r := recover(); r != nil { fmt.Println(r) return 1 } }()*/ var err error exeRoot, _ := osext.ExecutableFolder() var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT) datDirectory = filepath.Join(exeRoot, "data") idxDirectory = filepath.Join(exeRoot, "index") cmd.Title = fmt.Sprintf("Hashbox Server %s", Version) cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard) cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard) cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard) var loglvl int64 = int64(core.LogInfo) cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() { core.LogLevel = int(loglvl) }) // Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers // start opening files on their own // TODO: remove datPath global and send them into handlers on creation instead accountHandler = NewAccountHandler() defer accountHandler.Close() storageHandler = NewStorageHandler() defer storageHandler.Close() cmd.Command("", "", func() { // Default serverAddr := net.TCPAddr{nil, int(serverPort), ""} if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } var listener *net.TCPListener if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil { panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error()))) } core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String()) done = make(chan bool) defer close(done) signalchan := make(chan os.Signal, 1) defer close(signalchan) signal.Notify(signalchan, os.Interrupt) signal.Notify(signalchan, os.Kill) go func() { for s := range signalchan { core.Log(core.LogInfo, "Received OS signal: %v", s) listener.Close() // done <- true return } }() go connectionListener(listener) go func() { var lastStats string for { // ever time.Sleep(10 * time.Second) s := core.MemoryStats() if s != lastStats { fmt.Println(s) lastStats = s } } }() // blocking channel read select { case <-done: case <-accountHandler.signal: case <-storageHandler.signal: } core.Log(core.LogInfo, "Hashbox Server terminating") }) // TODO: This is a temporary hack to allow creation of hashback users on the server side // It should be an interface to an adminsitrative tool instead cmd.Command("adduser", "<username> <password>", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } if len(cmd.Args) < 4 { panic(errors.New("Missing argument to adduser command")) } if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) { panic(errors.New("Error creating account")) } accountNameH := core.Hash([]byte(cmd.Args[2])) dataEncryptionKey := core.GenerateDataEncryptionKey() core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey) core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3])) var blockData bytearray.ByteArray blockData.Write(dataEncryptionKey[:]) block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil) if !storageHandler.writeBlock(block) { panic(errors.New("Error writing key block")) } err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID}) PanicOn(err) block.Release() core.Log(core.LogInfo, "User added") }) var doRepair bool var doRebuild bool var skipData, skipMeta, skipIndex bool cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard) cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard) cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard) cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard) cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard) cmd.Command("check-storage", "", func() { if doRebuild { if len(cmd.Args) > 2 { panic("Start and end file arguments are not valid in combination with rebuild") } doRepair = true } startfile := int32(0) endfile := int32(-1) if len(cmd.Args) > 2 { i, err := strconv.ParseInt(cmd.Args[2], 0, 32) if err != nil { panic(err) } startfile = int32(i) core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile) } if len(cmd.Args) > 3 { i, err := strconv.ParseInt(cmd.Args[3], 0, 32) if err != nil { panic(err) } endfile = int32(i) core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile) } if doRepair { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } } start := time.Now() if doRebuild { core.Log(core.LogInfo, "Removing index files") storageHandler.RemoveFiles(storageFileTypeIndex) core.Log(core.LogInfo, "Removing meta files") storageHandler.RemoveFiles(storageFileTypeMeta) } core.Log(core.LogInfo, "Checking all storage files") repaired, critical := storageHandler.CheckFiles(doRepair) if !skipData && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking data files") r, c := storageHandler.CheckData(doRepair, startfile, endfile) repaired += r critical += c } if !skipMeta && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking meta files") storageHandler.CheckMeta() } if !skipIndex && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking index files") storageHandler.CheckIndexes(doRepair) } if doRepair || critical == 0 { core.Log(core.LogInfo, "Checking dataset transactions") rootlist := accountHandler.RebuildAccountFiles() core.Log(core.LogInfo, "Checking block chain integrity") verified := make(map[core.Byte128]bool) // Keep track of verified blocks for i, r := range rootlist { tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:]) core.Log(core.LogDebug, "CheckChain on %s", tag) c := storageHandler.CheckChain(r.BlockID, tag, verified) if c > 0 { accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID) } critical += c p := int(i * 100 / len(rootlist)) fmt.Printf("%d%%\r", p) } } if critical > 0 { core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical) } if repaired > 0 { core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired) } if critical == 0 && repaired == 0 { core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes()) } }) var doCompact bool var deadSkip int64 = 5 var skipSweep bool var doForce bool cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard) cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard) cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard) cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard) cmd.Command("gc", "", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } start := time.Now() if !skipSweep { core.Log(core.LogInfo, "Marking index entries") var roots []core.Byte128 for _, r := range accountHandler.CollectAllRootBlocks(doForce) { roots = append(roots, r.BlockID) } storageHandler.MarkIndexes(roots, true) storageHandler.SweepIndexes(true) core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes()) storageHandler.ShowStorageDeadSpace() } if doCompact { storageHandler.CompactIndexes(true) storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip)) storageHandler.CompactAll(storageFileTypeData, int(deadSkip)) } core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes()) }) err = cmd.Parse() PanicOn(err) fmt.Println(core.MemoryStats()) return 0 }
func NewHashboxBlock(dataType byte, data bytearray.ByteArray, links []Byte128) *HashboxBlock { block := HashboxBlock{Links: links, DataType: dataType, Data: data, Compressed: false, CompressedSize: -1, UncompressedSize: data.Len()} block.BlockID = block.HashData() return &block }