func (session *BackupSession) storeDir(path string, entry *FileEntry) (id core.Byte128, err error) { var links []core.Byte128 dir := DirectoryBlock{} var file *os.File if file, err = os.Open(path); err != nil { return } defer file.Close() var filelist []os.FileInfo if filelist, err = file.Readdir(-1); err != nil { return } sort.Sort(FileInfoSlice(filelist)) for _, info := range filelist { e, err := session.storePath(filepath.Join(path, info.Name()), false) if err != nil { session.Log("Skipping (ERROR) %v", err) } if e != nil { dir.File = append(dir.File, e) if e.HasContentBlockID() { links = append(links, e.ContentBlockID) } } } block := core.NewHashboxBlock(core.BlockDataTypeZlib, SerializeToByteArray(dir), links) id = block.BlockID if entry == nil || entry.ContentBlockID.Compare(id) != 0 { if id.Compare(session.Client.StoreBlock(block)) != 0 { panic(errors.New("ASSERT, server blockID != local blockID")) } } else { block.Release() } return }
func run() int { bytearray.EnableAutoGC(60, 74) runtime.SetBlockProfileRate(1000) go func() { log.Println(http.ListenAndServe(":6060", nil)) }() //defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop() /*defer func() { // Panic error handling if r := recover(); r != nil { fmt.Println(r) return 1 } }()*/ var err error exeRoot, _ := osext.ExecutableFolder() var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT) datDirectory = filepath.Join(exeRoot, "data") idxDirectory = filepath.Join(exeRoot, "index") cmd.Title = fmt.Sprintf("Hashbox Server %s", Version) cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard) cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard) cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard) var loglvl int64 = int64(core.LogInfo) cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() { core.LogLevel = int(loglvl) }) // Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers // start opening files on their own // TODO: remove datPath global and send them into handlers on creation instead accountHandler = NewAccountHandler() defer accountHandler.Close() storageHandler = NewStorageHandler() defer storageHandler.Close() cmd.Command("", "", func() { // Default serverAddr := net.TCPAddr{nil, int(serverPort), ""} if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } var listener *net.TCPListener if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil { panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error()))) } core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String()) done = make(chan bool) defer close(done) signalchan := make(chan os.Signal, 1) defer close(signalchan) signal.Notify(signalchan, os.Interrupt) signal.Notify(signalchan, os.Kill) go func() { for s := range signalchan { core.Log(core.LogInfo, "Received OS signal: %v", s) listener.Close() // done <- true return } }() go connectionListener(listener) go func() { var lastStats string for { // ever time.Sleep(10 * time.Second) s := core.MemoryStats() if s != lastStats { fmt.Println(s) lastStats = s } } }() // blocking channel read select { case <-done: case <-accountHandler.signal: case <-storageHandler.signal: } core.Log(core.LogInfo, "Hashbox Server terminating") }) // TODO: This is a temporary hack to allow creation of hashback users on the server side // It should be an interface to an adminsitrative tool instead cmd.Command("adduser", "<username> <password>", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } if len(cmd.Args) < 4 { panic(errors.New("Missing argument to adduser command")) } if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) { panic(errors.New("Error creating account")) } accountNameH := core.Hash([]byte(cmd.Args[2])) dataEncryptionKey := core.GenerateDataEncryptionKey() core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey) core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3])) var blockData bytearray.ByteArray blockData.Write(dataEncryptionKey[:]) block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil) if !storageHandler.writeBlock(block) { panic(errors.New("Error writing key block")) } err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID}) PanicOn(err) block.Release() core.Log(core.LogInfo, "User added") }) var doRepair bool var doRebuild bool var skipData, skipMeta, skipIndex bool cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard) cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard) cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard) cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard) cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard) cmd.Command("check-storage", "", func() { if doRebuild { if len(cmd.Args) > 2 { panic("Start and end file arguments are not valid in combination with rebuild") } doRepair = true } startfile := int32(0) endfile := int32(-1) if len(cmd.Args) > 2 { i, err := strconv.ParseInt(cmd.Args[2], 0, 32) if err != nil { panic(err) } startfile = int32(i) core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile) } if len(cmd.Args) > 3 { i, err := strconv.ParseInt(cmd.Args[3], 0, 32) if err != nil { panic(err) } endfile = int32(i) core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile) } if doRepair { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } } start := time.Now() if doRebuild { core.Log(core.LogInfo, "Removing index files") storageHandler.RemoveFiles(storageFileTypeIndex) core.Log(core.LogInfo, "Removing meta files") storageHandler.RemoveFiles(storageFileTypeMeta) } core.Log(core.LogInfo, "Checking all storage files") repaired, critical := storageHandler.CheckFiles(doRepair) if !skipData && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking data files") r, c := storageHandler.CheckData(doRepair, startfile, endfile) repaired += r critical += c } if !skipMeta && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking meta files") storageHandler.CheckMeta() } if !skipIndex && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking index files") storageHandler.CheckIndexes(doRepair) } if doRepair || critical == 0 { core.Log(core.LogInfo, "Checking dataset transactions") rootlist := accountHandler.RebuildAccountFiles() core.Log(core.LogInfo, "Checking block chain integrity") verified := make(map[core.Byte128]bool) // Keep track of verified blocks for i, r := range rootlist { tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:]) core.Log(core.LogDebug, "CheckChain on %s", tag) c := storageHandler.CheckChain(r.BlockID, tag, verified) if c > 0 { accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID) } critical += c p := int(i * 100 / len(rootlist)) fmt.Printf("%d%%\r", p) } } if critical > 0 { core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical) } if repaired > 0 { core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired) } if critical == 0 && repaired == 0 { core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes()) } }) var doCompact bool var deadSkip int64 = 5 var skipSweep bool var doForce bool cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard) cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard) cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard) cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard) cmd.Command("gc", "", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } start := time.Now() if !skipSweep { core.Log(core.LogInfo, "Marking index entries") var roots []core.Byte128 for _, r := range accountHandler.CollectAllRootBlocks(doForce) { roots = append(roots, r.BlockID) } storageHandler.MarkIndexes(roots, true) storageHandler.SweepIndexes(true) core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes()) storageHandler.ShowStorageDeadSpace() } if doCompact { storageHandler.CompactIndexes(true) storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip)) storageHandler.CompactAll(storageFileTypeData, int(deadSkip)) } core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes()) }) err = cmd.Parse() PanicOn(err) fmt.Println(core.MemoryStats()) return 0 }
func TestClientServerDataset(t *testing.T) { var err error conn, err := net.Dial("tcp", "127.0.0.1:1248") if err != nil { panic(err) } conn.SetDeadline(time.Now().Add(10 * time.Minute)) client := core.NewClient(conn, "test account", core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password")))) defer client.Close(true) // First make sure there is some data to reference var data bytearray.ByteArray block := core.NewHashboxBlock(core.BlockDataTypeZlib, data, nil) var blockID core.Byte128 blockID = client.StoreBlock(block) client.Commit() var array core.DatasetStateArray var stateID core.Byte128 // randomByte128() copy(stateID[:], []byte("testC")) keepstate := core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42} client.AddDatasetState("testset", keepstate) array = append(array, core.DatasetStateEntry{State: keepstate}) copy(stateID[:], []byte("testD")) keepstate = core.DatasetState{StateID: stateID, BlockID: blockID, Size: 125, UniqueSize: 33342} client.AddDatasetState("testset", keepstate) array = append(array, core.DatasetStateEntry{State: keepstate}) copy(stateID[:], []byte("testA")) client.AddDatasetState("testset", core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42}) copy(stateID[:], []byte("testB")) client.AddDatasetState("testset", core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42}) copy(stateID[:], []byte("testB")) client.RemoveDatasetState("testset", stateID) copy(stateID[:], []byte("testA")) client.RemoveDatasetState("testset", stateID) client.RemoveDatasetState("testset", stateID) hash := md5.New() array[0].Serialize(hash) array[1].Serialize(hash) var localHash core.Byte128 copy(localHash[:], hash.Sum(nil)[:16]) info := client.GetAccountInfo() if reflect.TypeOf(info).String() != "*core.MsgServerAccountInfo" { t.Error("Invalid response to client.GetAccountInfo") } if len(info.DatasetList) != 1 { t.Error("More than one dataset was found under test account") } else { if info.DatasetList[0].Name != "testset" { t.Error("First dataset is not named \"testset\"") } if fmt.Sprintf("%x", info.DatasetList[0].ListH) != fmt.Sprintf("%x", localHash) { t.Error(fmt.Sprintf("%x != %x", info.DatasetList[0].ListH, localHash)) t.Error("List hash could not be verified") } if info.DatasetList[0].Size != 33509 { t.Error(fmt.Sprintf("Wrong total size for the dataset %d != %d", info.DatasetList[0].Size, 33509)) } } list := client.ListDataset("testset") if list == nil { t.Error("Dataset \"testset\" was not found") } else if !reflect.DeepEqual(array, list.States) { fmt.Println(array) fmt.Println(list.States) t.Error("Local list and remote list not equal") } }