func TestServerTestAccount(t *testing.T) { A := AccountInfo{ AccountName: "test account", AccessKey: core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password"))), Datasets: core.DatasetArray{}, } accountHandler.SetInfo(A) JA, _ := json.Marshal(A) B := accountHandler.GetInfo(core.Hash([]byte(A.AccountName))) JB, _ := json.Marshal(*B) if B == nil { t.Error("Account not found") } else if string(JA) != string(JB) { fmt.Println(string(JA)) fmt.Println(string(JB)) t.Error("Accounts not equal") } C := accountHandler.GetInfo(core.Hash([]byte(""))) if C != nil { t.Error("Account without name found") } }
func getDatasetNameFromFile(filename string) core.String { // Open the file, read and check the file headers fil, err := os.Open(filepath.Join(datDirectory, "account", filename)) PanicOn(err) defer fil.Close() var header dbFileHeader header.Unserialize(fil) if header.filetype != dbFileTypeTransaction { panic(errors.New("File " + filename + " is not a valid transaction file")) } datasetName := header.datasetName var datasetNameH core.Byte128 { d, err := base64.RawURLEncoding.DecodeString(filename[23:45]) PanicOn(err) datasetNameH.Set(d) } datasetHashB := core.Hash([]byte(datasetName)) if datasetHashB.Compare(datasetNameH) != 0 { panic(errors.New("Header for " + filename + " does not contain the correct dataset name")) } return datasetName }
func (session *BackupSession) Store(datasetName string, path ...string) { var err error // Setup the reference backup engine session.reference = NewReferenceEngine(session, core.Hash([]byte(datasetName))) defer session.reference.Close() // Convert relative paths to absolute paths for i := 0; i < len(path); i++ { p, err := filepath.Abs(path[i]) if err == nil { path[i] = p } } sort.Sort(ByBase(path)) // Do we need a virtual root folder? var virtualRootDir *DirectoryBlock { info, err := os.Lstat(path[0]) if err != nil { panic(err) } if !info.IsDir() || len(path) > 1 { virtualRootDir = &DirectoryBlock{} session.reference.virtualRoot = make(map[string]string) for _, s := range path { session.reference.virtualRoot[filepath.Base(s)] = s } } else { session.reference.path = append(session.reference.path, path[0]) } } // Load up last backup into the reference engine if !session.FullBackup { list := session.Client.ListDataset(datasetName) found := len(list.States) - 1 for ; found >= 0; found-- { if list.States[found].StateFlags&core.StateFlagInvalid != core.StateFlagInvalid { break } } if found >= 0 { session.reference.start(&list.States[found].State.BlockID) } else { session.reference.start(nil) } } if virtualRootDir != nil { var links []core.Byte128 var entry *FileEntry for _, s := range path { entry, err = session.storePath(s, true) if err != nil { panic(err) } else if entry == nil { panic(errors.New(fmt.Sprintf("Unable to store %s", s))) } else if virtualRootDir != nil { virtualRootDir.File = append(virtualRootDir.File, entry) if entry.ContentType != ContentTypeEmpty { links = append(links, entry.ContentBlockID) } } } session.State.BlockID = session.Client.StoreData(core.BlockDataTypeZlib, SerializeToByteArray(virtualRootDir), links) } else { session.State.BlockID, err = session.storeDir(path[0], nil) PanicOn(err) } // Commit all pending writes // session.Client.Commit() for !session.Client.Done() { session.PrintStoreProgress(PROGRESS_INTERVAL_SECS) time.Sleep(100 * time.Millisecond) } session.Client.AddDatasetState(datasetName, *session.State) // Close and rename the current reference cache file for future use session.reference.Commit(session.State.BlockID) fmt.Println() session.PrintStoreProgress(0) }
func (handler *AccountHandler) dispatcher() { defer func() { // query cleanup close(handler.query) for q := range handler.query { close(q.result) } // did this goroutine panic? switch r := recover().(type) { case error: core.Log(core.LogError, "%v", r) handler.signal <- r } handler.wg.Done() }() for { select { // Command type priority queue, top commands get executed first case q := <-handler.query: func() { defer close(q.result) // Always close the result channel after returning switch q.query { case accounthandler_getinfo: accountNameH := q.data.(core.Byte128) q.result <- readInfoFile(accountNameH) case accounthandler_setinfo: accountInfo := q.data.(AccountInfo) accountNameH := core.Hash([]byte(accountInfo.AccountName)) writeInfoFile(accountNameH, accountInfo) q.result <- true case accounthandler_listset: list := q.data.(queryListDataset) q.result <- readDBFile(list.AccountNameH, list.DatasetName) case accounthandler_addset: add := q.data.(queryAddDatasetState) result := appendDatasetTx(add.AccountNameH, add.DatasetName, dbTx{timestamp: time.Now().UnixNano(), txType: dbTxTypeAdd, data: add.State}) { // update the db file collection := readDBFile(add.AccountNameH, add.DatasetName) if collection != nil { for i, s := range collection.States { if s.State.StateID.Compare(add.State.StateID) == 0 { collection.States[i] = collection.States[len(collection.States)-1] collection.States = collection.States[:len(collection.States)-1] break } } } else { collection = &dbStateCollection{} } collection.States = append(collection.States, core.DatasetStateEntry{State: add.State}) sort.Sort(collection.States) writeDBFile(add.AccountNameH, add.DatasetName, collection) } q.result <- result case accounthandler_removeset: del := q.data.(queryRemoveDatasetState) result := appendDatasetTx(del.AccountNameH, del.DatasetName, dbTx{timestamp: time.Now().UnixNano(), txType: dbTxTypeDel, data: del.StateID}) { // update the db file collection := readDBFile(del.AccountNameH, del.DatasetName) if collection != nil { for i, s := range collection.States { if s.State.StateID.Compare(del.StateID) == 0 { copy(collection.States[i:], collection.States[i+1:]) collection.States = collection.States[:len(collection.States)-1] break } } } else { collection = &dbStateCollection{} } writeDBFile(del.AccountNameH, del.DatasetName, collection) } q.result <- result default: panic(errors.New(fmt.Sprintf("Unknown query in AccountHandler causing hangup: %d", q.query))) } }() case _, ok := <-handler.signal: // Signal is closed? // TODO: remove this check if ok { panic(errors.New("We should not reach this point, it means someone outside this goroutine sent a signal on the channel")) } return } } }
func datasetFilename(aH core.Byte128, dName string) string { dNameH := core.Hash([]byte(dName)) return accountFilename(aH) + "." + base64filename(dNameH[:]) }
func GenerateBackupKey(account string, password string) core.Byte128 { return core.DeepHmac(20000, append([]byte(account), []byte("*ENCRYPTION*PAD*")...), core.Hash([]byte(password))) }
func GenerateAccessKey(account string, password string) core.Byte128 { return core.DeepHmac(20000, append([]byte(account), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte(password))) }
func run() int { bytearray.EnableAutoGC(60, 74) runtime.SetBlockProfileRate(1000) go func() { log.Println(http.ListenAndServe(":6060", nil)) }() //defer profile.Start(&profile.Config{CPUProfile: false, MemProfile: true, ProfilePath: ".", NoShutdownHook: true}).Stop() /*defer func() { // Panic error handling if r := recover(); r != nil { fmt.Println(r) return 1 } }()*/ var err error exeRoot, _ := osext.ExecutableFolder() var serverPort int64 = int64(DEFAULT_SERVER_IP_PORT) datDirectory = filepath.Join(exeRoot, "data") idxDirectory = filepath.Join(exeRoot, "index") cmd.Title = fmt.Sprintf("Hashbox Server %s", Version) cmd.IntOption("port", "", "<port>", "Server listening port", &serverPort, cmd.Standard) cmd.StringOption("data", "", "<path>", "Full path to dat files", &datDirectory, cmd.Standard) cmd.StringOption("index", "", "<path>", "Full path to idx and meta files", &idxDirectory, cmd.Standard) var loglvl int64 = int64(core.LogInfo) cmd.IntOption("loglevel", "", "<level>", "Set log level (0=errors, 1=warnings, 2=info, 3=debug, 4=trace", &loglvl, cmd.Hidden).OnChange(func() { core.LogLevel = int(loglvl) }) // Please note that datPath has not been set until we have parsed arguments, that is ok because neither of the handlers // start opening files on their own // TODO: remove datPath global and send them into handlers on creation instead accountHandler = NewAccountHandler() defer accountHandler.Close() storageHandler = NewStorageHandler() defer storageHandler.Close() cmd.Command("", "", func() { // Default serverAddr := net.TCPAddr{nil, int(serverPort), ""} if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } var listener *net.TCPListener if listener, err = net.ListenTCP("tcp", &serverAddr); err != nil { panic(errors.New(fmt.Sprintf("Error listening: %v", err.Error()))) } core.Log(core.LogInfo, "%s is listening on %s", cmd.Title, listener.Addr().String()) done = make(chan bool) defer close(done) signalchan := make(chan os.Signal, 1) defer close(signalchan) signal.Notify(signalchan, os.Interrupt) signal.Notify(signalchan, os.Kill) go func() { for s := range signalchan { core.Log(core.LogInfo, "Received OS signal: %v", s) listener.Close() // done <- true return } }() go connectionListener(listener) go func() { var lastStats string for { // ever time.Sleep(10 * time.Second) s := core.MemoryStats() if s != lastStats { fmt.Println(s) lastStats = s } } }() // blocking channel read select { case <-done: case <-accountHandler.signal: case <-storageHandler.signal: } core.Log(core.LogInfo, "Hashbox Server terminating") }) // TODO: This is a temporary hack to allow creation of hashback users on the server side // It should be an interface to an adminsitrative tool instead cmd.Command("adduser", "<username> <password>", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } if len(cmd.Args) < 4 { panic(errors.New("Missing argument to adduser command")) } if (!accountHandler.SetInfo(AccountInfo{AccountName: core.String(cmd.Args[2]), AccessKey: core.GenerateAccessKey(cmd.Args[2], cmd.Args[3])})) { panic(errors.New("Error creating account")) } accountNameH := core.Hash([]byte(cmd.Args[2])) dataEncryptionKey := core.GenerateDataEncryptionKey() core.Log(core.LogDebug, "DataEncryptionKey is: %x", dataEncryptionKey) core.EncryptDataInPlace(dataEncryptionKey[:], core.GenerateBackupKey(cmd.Args[2], cmd.Args[3])) var blockData bytearray.ByteArray blockData.Write(dataEncryptionKey[:]) block := core.NewHashboxBlock(core.BlockDataTypeRaw, blockData, nil) if !storageHandler.writeBlock(block) { panic(errors.New("Error writing key block")) } err := accountHandler.AddDatasetState(accountNameH, core.String("\x07HASHBACK_DEK"), core.DatasetState{BlockID: block.BlockID}) PanicOn(err) block.Release() core.Log(core.LogInfo, "User added") }) var doRepair bool var doRebuild bool var skipData, skipMeta, skipIndex bool cmd.BoolOption("repair", "check-storage", "Repair non-fatal errors", &doRepair, cmd.Standard) cmd.BoolOption("rebuild", "check-storage", "Rebuild index and meta files from data", &doRebuild, cmd.Standard) cmd.BoolOption("skipdata", "check-storage", "Skip checking data files", &skipData, cmd.Standard) cmd.BoolOption("skipmeta", "check-storage", "Skip checking meta files", &skipMeta, cmd.Standard) cmd.BoolOption("skipindex", "check-storage", "Skip checking index files", &skipIndex, cmd.Standard) cmd.Command("check-storage", "", func() { if doRebuild { if len(cmd.Args) > 2 { panic("Start and end file arguments are not valid in combination with rebuild") } doRepair = true } startfile := int32(0) endfile := int32(-1) if len(cmd.Args) > 2 { i, err := strconv.ParseInt(cmd.Args[2], 0, 32) if err != nil { panic(err) } startfile = int32(i) core.Log(core.LogInfo, "Starting from file #%d (%04x)", startfile, startfile) } if len(cmd.Args) > 3 { i, err := strconv.ParseInt(cmd.Args[3], 0, 32) if err != nil { panic(err) } endfile = int32(i) core.Log(core.LogInfo, "Stopping after file #%d (%04x)", endfile, endfile) } if doRepair { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } } start := time.Now() if doRebuild { core.Log(core.LogInfo, "Removing index files") storageHandler.RemoveFiles(storageFileTypeIndex) core.Log(core.LogInfo, "Removing meta files") storageHandler.RemoveFiles(storageFileTypeMeta) } core.Log(core.LogInfo, "Checking all storage files") repaired, critical := storageHandler.CheckFiles(doRepair) if !skipData && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking data files") r, c := storageHandler.CheckData(doRepair, startfile, endfile) repaired += r critical += c } if !skipMeta && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking meta files") storageHandler.CheckMeta() } if !skipIndex && (doRepair || critical == 0) { core.Log(core.LogInfo, "Checking index files") storageHandler.CheckIndexes(doRepair) } if doRepair || critical == 0 { core.Log(core.LogInfo, "Checking dataset transactions") rootlist := accountHandler.RebuildAccountFiles() core.Log(core.LogInfo, "Checking block chain integrity") verified := make(map[core.Byte128]bool) // Keep track of verified blocks for i, r := range rootlist { tag := fmt.Sprintf("%s.%s.%x", r.AccountName, r.DatasetName, r.StateID[:]) core.Log(core.LogDebug, "CheckChain on %s", tag) c := storageHandler.CheckChain(r.BlockID, tag, verified) if c > 0 { accountHandler.InvalidateDatasetState(r.AccountNameH, r.DatasetName, r.StateID) } critical += c p := int(i * 100 / len(rootlist)) fmt.Printf("%d%%\r", p) } } if critical > 0 { core.Log(core.LogError, "Detected %d critical errors, DO NOT start the server unless everything is repaired", critical) } if repaired > 0 { core.Log(core.LogWarning, "Performed %d repairs, please run again to verify repairs", repaired) } if critical == 0 && repaired == 0 { core.Log(core.LogInfo, "All checks completed successfully in %.1f minutes", time.Since(start).Minutes()) } }) var doCompact bool var deadSkip int64 = 5 var skipSweep bool var doForce bool cmd.BoolOption("compact", "gc", "Compact data files to free space", &doCompact, cmd.Standard) cmd.BoolOption("skipsweep", "gc", "Skip sweeping indexes", &skipSweep, cmd.Standard) cmd.BoolOption("force", "gc", "Ignore broken datasets and force a garbage collect", &doForce, cmd.Standard) cmd.IntOption("threshold", "gc", "<percentage>", "Compact minimum dead space threshold", &deadSkip, cmd.Standard) cmd.Command("gc", "", func() { if lock, err := lockfile.Lock(filepath.Join(datDirectory, "hashbox.lck")); err != nil { panic(err) } else { defer lock.Unlock() } start := time.Now() if !skipSweep { core.Log(core.LogInfo, "Marking index entries") var roots []core.Byte128 for _, r := range accountHandler.CollectAllRootBlocks(doForce) { roots = append(roots, r.BlockID) } storageHandler.MarkIndexes(roots, true) storageHandler.SweepIndexes(true) core.Log(core.LogInfo, "Mark and sweep duration %.1f minutes", time.Since(start).Minutes()) storageHandler.ShowStorageDeadSpace() } if doCompact { storageHandler.CompactIndexes(true) storageHandler.CompactAll(storageFileTypeMeta, int(deadSkip)) storageHandler.CompactAll(storageFileTypeData, int(deadSkip)) } core.Log(core.LogInfo, "Garbage collection completed in %.1f minutes", time.Since(start).Minutes()) }) err = cmd.Parse() PanicOn(err) fmt.Println(core.MemoryStats()) return 0 }
func TestClientServerAuthentication(t *testing.T) { var err error conn, err := net.Dial("tcp", "127.0.0.1:1248") if err != nil { panic(err) } conn.SetDeadline(time.Now().Add(15 * time.Second)) client := core.NewClient(conn, "test account", core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password")))) defer client.Close(true) }
func TestClientServerHashboxBlocks(t *testing.T) { var err error conn, err := net.Dial("tcp", "127.0.0.1:1248") if err != nil { panic(err) } conn.SetDeadline(time.Now().Add(10 * time.Minute)) client := core.NewClient(conn, "test account", core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password")))) defer client.Close(true) { var data bytearray.ByteArray data.Write([]byte("HELLO!")) blockID := client.StoreData(core.BlockDataTypeRaw, data, nil) client.Commit() block := client.ReadBlock(blockID) if !reflect.DeepEqual(blockID, block.BlockID) { t.Error("Received block has wrong ID") } rdata, _ := block.Data.ReadSlice() if !bytes.Equal(rdata, []byte("HELLO!")) { t.Error("Block contains wrong data, expected \"HELLO!\" received \"" + string(rdata) + "\"") } } { var data bytearray.ByteArray data.Write([]byte("Bet it all on black?")) blockID := client.StoreData(core.BlockDataTypeRaw, data, nil) client.Commit() block := client.ReadBlock(blockID) if !reflect.DeepEqual(blockID, block.BlockID) { t.Error("Received block has wrong ID") } rdata, _ := block.Data.ReadSlice() if !bytes.Equal(rdata, []byte("Bet it all on black?")) { t.Error("Block contains wrong data, expected \"Bet it all on black?\" received \"" + string(rdata) + "\"") } } }
func TestClientServerDataset(t *testing.T) { var err error conn, err := net.Dial("tcp", "127.0.0.1:1248") if err != nil { panic(err) } conn.SetDeadline(time.Now().Add(10 * time.Minute)) client := core.NewClient(conn, "test account", core.DeepHmac(20000, append([]byte("test account"), []byte("*ACCESS*KEY*PAD*")...), core.Hash([]byte("password")))) defer client.Close(true) // First make sure there is some data to reference var data bytearray.ByteArray block := core.NewHashboxBlock(core.BlockDataTypeZlib, data, nil) var blockID core.Byte128 blockID = client.StoreBlock(block) client.Commit() var array core.DatasetStateArray var stateID core.Byte128 // randomByte128() copy(stateID[:], []byte("testC")) keepstate := core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42} client.AddDatasetState("testset", keepstate) array = append(array, core.DatasetStateEntry{State: keepstate}) copy(stateID[:], []byte("testD")) keepstate = core.DatasetState{StateID: stateID, BlockID: blockID, Size: 125, UniqueSize: 33342} client.AddDatasetState("testset", keepstate) array = append(array, core.DatasetStateEntry{State: keepstate}) copy(stateID[:], []byte("testA")) client.AddDatasetState("testset", core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42}) copy(stateID[:], []byte("testB")) client.AddDatasetState("testset", core.DatasetState{StateID: stateID, BlockID: blockID, Size: 5, UniqueSize: 42}) copy(stateID[:], []byte("testB")) client.RemoveDatasetState("testset", stateID) copy(stateID[:], []byte("testA")) client.RemoveDatasetState("testset", stateID) client.RemoveDatasetState("testset", stateID) hash := md5.New() array[0].Serialize(hash) array[1].Serialize(hash) var localHash core.Byte128 copy(localHash[:], hash.Sum(nil)[:16]) info := client.GetAccountInfo() if reflect.TypeOf(info).String() != "*core.MsgServerAccountInfo" { t.Error("Invalid response to client.GetAccountInfo") } if len(info.DatasetList) != 1 { t.Error("More than one dataset was found under test account") } else { if info.DatasetList[0].Name != "testset" { t.Error("First dataset is not named \"testset\"") } if fmt.Sprintf("%x", info.DatasetList[0].ListH) != fmt.Sprintf("%x", localHash) { t.Error(fmt.Sprintf("%x != %x", info.DatasetList[0].ListH, localHash)) t.Error("List hash could not be verified") } if info.DatasetList[0].Size != 33509 { t.Error(fmt.Sprintf("Wrong total size for the dataset %d != %d", info.DatasetList[0].Size, 33509)) } } list := client.ListDataset("testset") if list == nil { t.Error("Dataset \"testset\" was not found") } else if !reflect.DeepEqual(array, list.States) { fmt.Println(array) fmt.Println(list.States) t.Error("Local list and remote list not equal") } }