// SETUP AND TEARDOWN /////////////////////////////////////////////// func (s *XLSuite) setUpHashTest() { var err error found, err := xf.PathExists(dataPath) if !found { // MODE SUSPECT if err = os.MkdirAll(dataPath, 0775); err != nil { fmt.Printf("error creating %s: %v\n", dataPath, err) } } found, err = xf.PathExists(uPath) if !found { // MODE SUSPECT if err = os.MkdirAll(uPath, 0775); err != nil { fmt.Printf("error creating %s: %v\n", uPath, err) } } found, err = xf.PathExists(uInDir) if !found { // MODE SUSPECT if err = os.MkdirAll(uInDir, 0775); err != nil { fmt.Printf("error creating %s: %v\n", uInDir, err) } } found, err = xf.PathExists(uTmpDir) if !found { // MODE SUSPECT if err = os.MkdirAll(uTmpDir, 0775); err != nil { fmt.Printf("error creating %s: %v\n", uTmpDir, err) } } }
func (u *U16x16) PutData2(data []byte, key string) (length int64, hash string, err error) { s := sha256.New() s.Write(data) hash = hex.EncodeToString(s.Sum(nil)) if hash != key { fmt.Printf("expected data to have key %s, but content key is %s", key, hash) err = errors.New("content/key mismatch") return } length = int64(len(data)) topSubDir := hash[0:1] lowerDir := hash[1:2] targetDir := filepath.Join(u.path, topSubDir, lowerDir) found, err := xf.PathExists(targetDir) if err == nil && !found { err = os.MkdirAll(targetDir, 0775) } fullishPath := filepath.Join(targetDir, key[2:]) found, err = xf.PathExists(fullishPath) if !found { var dest *os.File dest, err = os.Create(fullishPath) if err == nil { var count int defer dest.Close() count, err = dest.Write(data) if err == nil { length = int64(count) } } } return }
func (s *XLSuite) doTestLoadEntries(c *C, rng *xr.PRNG, whichSHA int) { K := 16 + rng.Intn(16) // create a unique name for a scratch file pathToFile := filepath.Join("tmp", rng.NextFileName(16)) found, err := xf.PathExists(pathToFile) c.Assert(err, IsNil) for found { pathToFile = filepath.Join("tmp", rng.NextFileName(16)) found, err = xf.PathExists(pathToFile) c.Assert(err, IsNil) } f, err := os.OpenFile(pathToFile, os.O_CREATE|os.O_WRONLY, 0600) c.Assert(err, IsNil) // create K entries, saving them in a slice while writing them // to disk var entries []*LogEntry for i := 0; i < K; i++ { t, key, nodeID, src, path := s.makeEntryData(c, rng, whichSHA) entry, err := NewLogEntry(t, key, nodeID, src, path) c.Assert(err, IsNil) strEntry := entry.String() entries = append(entries, entry) var count int count, err = f.WriteString(strEntry + "\n") c.Assert(err, IsNil) c.Assert(count, Equals, len(strEntry)+1) } f.Close() c.Assert(len(entries), Equals, K) // use UpaxServer.LoadEntries to load the stuff in the file. m, err := xi.NewNewIDMap() c.Assert(err, IsNil) count, err := loadEntries(pathToFile, m, whichSHA) c.Assert(err, IsNil) c.Assert(count, Equals, K) // K entries loaded. for i := 0; i < K; i++ { var entry, eInMap *LogEntry var whatever interface{} entry = entries[i] key := entry.key whatever, err = m.Find(key) c.Assert(err, IsNil) c.Assert(whatever, NotNil) eInMap = whatever.(*LogEntry) // DEBUG // XXX NEED LogEntry.Equal() // END c.Assert(bytes.Equal(key, eInMap.key), Equals, true) } }
func (s *XLSuite) doTestExists(c *C, u UI, digest hash.Hash) { //we are testing whether = u.Exists( key) and whether = u.KeyExists() rng := u.GetRNG() dLen, dPath := rng.NextDataFile(dataPath, 16*1024, 1) var dKey string var err error switch whichSHA { case xu.USING_SHA1: dKey, err = FileHexSHA1(dPath) case xu.USING_SHA2: dKey, err = FileHexSHA2(dPath) case xu.USING_SHA3: dKey, err = FileHexSHA3(dPath) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) var uLen int64 var uKey string switch whichSHA { case xu.USING_SHA1: uLen, uKey, err = u.CopyAndPut1(dPath, dKey) case xu.USING_SHA2: uLen, uKey, err = u.CopyAndPut2(dPath, dKey) case xu.USING_SHA3: uLen, uKey, err = u.CopyAndPut3(dPath, dKey) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) c.Assert(dLen, Equals, uLen) kPath, err := u.GetPathForHexKey(uKey) c.Assert(err, Equals, nil) found, err := xf.PathExists(kPath) c.Assert(err, IsNil) c.Assert(found, Equals, true) bKey, err := hex.DecodeString(uKey) c.Assert(err, IsNil) found, err = u.HexKeyExists(uKey) // string version of key c.Assert(err, IsNil) c.Assert(found, Equals, true) found, err = u.ByteKeyExists(bKey) // binary version of key c.Assert(err, IsNil) c.Assert(found, Equals, true) os.Remove(kPath) found, err = xf.PathExists(kPath) // string version c.Assert(err, IsNil) c.Assert(found, Equals, false) found, err = u.ByteKeyExists(bKey) // binary version of key c.Assert(err, IsNil) c.Assert(found, Equals, false) }
// UNIT TESTS /////////////////////////////////////////////////////// func (s *XLSuite) doTestCopyAndPut( c *C, u UI, digest hash.Hash) { //we are testing uLen, uKey, err = u.CopyAndPut3(path, key) // create a random file rng := u.GetRNG() dLen, dPath := rng.NextDataFile(dataPath, 16*1024, 1) // maxLen, minLen var dKey string var err error switch whichSHA { case xu.USING_SHA1: dKey, err = FileHexSHA1(dPath) case xu.USING_SHA2: dKey, err = FileHexSHA2(dPath) case xu.USING_SHA3: dKey, err = FileHexSHA3(dPath) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) // actual, Equals, expected // invoke function var uLen int64 var uKey string switch whichSHA { case xu.USING_SHA1: uLen, uKey, err = u.CopyAndPut1(dPath, dKey) case xu.USING_SHA2: uLen, uKey, err = u.CopyAndPut2(dPath, dKey) case xu.USING_SHA3: uLen, uKey, err = u.CopyAndPut3(dPath, dKey) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) c.Assert(dLen, Equals, uLen) c.Assert(dKey, Equals, uKey) // verify that original and copy both exist found, err := xf.PathExists(dPath) c.Assert(err, IsNil) c.Assert(found, Equals, true) xPath, err := u.GetPathForHexKey(uKey) c.Assert(err, IsNil) found, err = xf.PathExists(xPath) c.Assert(err, IsNil) c.Assert(found, Equals, true) // HACK - SIMPLEST Keccak TEST VECTOR if whichSHA == xu.USING_SHA3 { dKey, err = FileHexSHA3("abc") fmt.Printf("SHA3-256 for 'abc' is %s\n", dKey) } // END HACK }
// These are operations on the file system. Directory depth is at least 1 // and no more than 'depth'. Likewise for width, the number of // files in a directory, where a file is either a data file or a subdirectory. // The number of bytes in a file is at least minLen and less than maxLen. // Subdirectory names may be random // // XXX Changed to return an int64 length. // func (p *PRNG) NextDataFile(dirName string, maxLen int, minLen int) ( int64, string) { // silently convert parameters to reasonable values if minLen < 0 { minLen = 0 } if maxLen < minLen+1 { maxLen = minLen + 1 } // create the data directory if it does not exist dirExists, err := xf.PathExists(dirName) if err != nil { panic(err) } if !dirExists { os.MkdirAll(dirName, 0755) } // loop until the file does not exist pathToFile := dirName + "/" + p.NextFileName(16) pathExists, err := xf.PathExists(pathToFile) if err != nil { panic(err) } for pathExists { pathToFile := dirName + "/" + p.NextFileName(16) pathExists, err = xf.PathExists(pathToFile) if err != nil { panic(err) } } count := minLen + int(p.NextFloat32()*float32((maxLen-minLen))) data := make([]byte, count) p.NextBytes(data) // fill with random bytes fo, err := os.Create(pathToFile) if err != nil { panic(err) } defer func() { if err := fo.Close(); err != nil { panic(err) } }() // XXX wakaranai // XXX this should be chunked // XXX data should be slice if _, err := fo.Write(data); err != nil { panic(err) } // XXX respec to also return err return int64(count), pathToFile }
// tmp is the path to a local file which will be renamed into U (or deleted // if it is already present in U) // u.path is an absolute or relative path to a U directory organized 16x16 // key is an sha1 content hash. // If the operation succeeds we return the length of the file (which must // not be zero. Otherwise we return 0. // we don't do much checking. // func (u *U16x16) Put1(inFile, key string) ( length int64, hash string, err error) { var ( found bool fullishPath string topSubDir, lowerDir, targetDir string ) hash, err = FileHexSHA1(inFile) if err != nil { fmt.Printf("DEBUG: FileHexSHA1 returned error %v\n", err) return } if hash != key { fmt.Printf("expected %s to have key %s, but the content key is %s\n", inFile, key, hash) err = errors.New("IllegalArgument: Put1: key does not match content") return } info, err := os.Stat(inFile) if err != nil { return } length = info.Size() topSubDir = hash[0:1] lowerDir = hash[1:2] targetDir = filepath.Join(u.path, topSubDir, lowerDir) found, err = xf.PathExists(targetDir) if err == nil && !found { // XXX MODE IS SUSPECT err = os.MkdirAll(targetDir, 0775) } if err == nil { fullishPath = filepath.Join(targetDir, key[2:]) found, err = xf.PathExists(fullishPath) } if err == nil { if found { // drop the temporary input file err = os.Remove(inFile) } else { // rename the temporary file into U err = os.Rename(inFile, fullishPath) if err == nil { err = os.Chmod(fullishPath, 0444) } } } return }
func (u2 *U256x256) CopyAndPut2(path, key string) ( written int64, hash string, err error) { // the temporary file MUST be created on the same device tmpFileName := filepath.Join(u2.tmpDir, u2.rng.NextFileName(16)) found, _ := xf.PathExists(tmpFileName) // XXX error ignored for found { tmpFileName = filepath.Join(u2.tmpDir, u2.rng.NextFileName(16)) found, _ = xf.PathExists(tmpFileName) } written, err = CopyFile(tmpFileName, path) // dest <== src if err == nil { written, hash, err = u2.Put2(tmpFileName, key) } return }
func (u *U16x16) ByteKeyExists(key []byte) (found bool, err error) { path, err := u.GetPathForByteKey(key) if err == nil { found, err = xf.PathExists(path) } return }
func (s *XLSuite) verifyLeafSHA(c *C, rng *xr.PRNG, node MerkleNodeI, pathToFile string, whichSHA int) { c.Assert(node.IsLeaf(), Equals, true) found, err := xf.PathExists(pathToFile) c.Assert(err, IsNil) c.Assert(found, Equals, true) data, err := ioutil.ReadFile(pathToFile) c.Assert(err, IsNil) c.Assert(data, NotNil) var sha hash.Hash switch whichSHA { case xu.USING_SHA1: sha = sha1.New() case xu.USING_SHA2: sha = sha256.New() case xu.USING_SHA3: sha = sha3.New256() // XXX DEFAULT = ERROR } sha.Write(data) sum := sha.Sum(nil) c.Assert(node.GetHash(), DeepEquals, sum) }
func (u *U16x16) GetData2(key string) (data []byte, err error) { var ( found bool path string ) path, err = u.GetPathForKey(key) if err == nil { found, err = xf.PathExists(path) } if err == nil && !found { err = FileNotFound } if err == nil { var src *os.File if src, err = os.Open(path); err != nil { return } defer src.Close() var count int // XXX THIS WILL NOT WORK FOR LARGER FILES! It will ignore // anything over 128 KB data = make([]byte, DEFAULT_BUFFER_SIZE) count, err = src.Read(data) // XXX COUNT IS IGNORED _ = count } return }
func (u *U16x16) HexKeyExists(key string) (found bool, err error) { path, err := u.GetPathForHexKey(key) if err == nil { found, err = xf.PathExists(path) } return }
// PutData2 --------------------------------------------------------- func (u *UFlat) PutData2(data []byte, key string) ( length int64, hash string, err error) { var fullishPath string var found bool s := sha256.New() s.Write(data) hash = hex.EncodeToString(s.Sum(nil)) if hash != key { fmt.Printf("expected data to have key %s, but content key is %s", key, hash) err = errors.New("content/key mismatch") return } length = int64(len(data)) if err == nil { fullishPath = filepath.Join(u.path, key) found, err = xf.PathExists(fullishPath) if err == nil && !found { var dest *os.File dest, err = os.Create(fullishPath) if err == nil { var count int defer dest.Close() count, err = dest.Write(data) if err == nil { length = int64(count) } } } } return }
func (u2 *U256x256) CopyAndPut1(path, key string) ( written int64, hash string, err error) { // the temporary file MUST be created on the same device // xxx POSSIBLE RACE CONDITION tmpFileName := filepath.Join(u2.tmpDir, u2.rng.NextFileName(16)) found, err := xf.PathExists(tmpFileName) for found { tmpFileName = filepath.Join(u2.tmpDir, u2.rng.NextFileName(16)) found, err = xf.PathExists(tmpFileName) } written, err = CopyFile(tmpFileName, path) // dest <== src if err == nil { written, hash, err = u2.Put1(tmpFileName, key) } return }
// Copy the file at path to a randomly-named temporary file under U/tmp. // If that operation succeeds, we then attempt to rename the file into // the appropriate U data subdirectory. If the file is already present, // we silently discard the copy. Returns the length of the file in bytes, // its actual content hash, and any error. // func (u *UFlat) CopyAndPut3(path, key string) ( written int64, hash string, err error) { // the temporary file MUST be created on the same device // xxx POSSIBLE RACE CONDITION tmpFileName := filepath.Join(u.tmpDir, u.rng.NextFileName(16)) found, _ := xf.PathExists(tmpFileName) // XXX error ignored for found { tmpFileName = filepath.Join(u.tmpDir, u.rng.NextFileName(16)) found, _ = xf.PathExists(tmpFileName) } written, err = CopyFile(tmpFileName, path) // dest <== src if err == nil { written, hash, err = u.Put3(tmpFileName, key) } return }
func (u2 *U256x256) PutData1(data []byte, key string) ( length int64, hash string, err error) { var fullishPath string var found bool s := sha1.New() s.Write(data) hash = hex.EncodeToString(s.Sum(nil)) if hash != key { fmt.Printf("expected data to have key %s, but content key is %s", key, hash) err = errors.New("content/key mismatch") return } length = int64(len(data)) topSubDir := hash[0:2] lowerDir := hash[2:4] targetDir := filepath.Join(u2.path, topSubDir, lowerDir) found, err = xf.PathExists(targetDir) if err == nil && !found { // MODE QUESTIONABLE err = os.MkdirAll(targetDir, 0775) } if err == nil { fullishPath = filepath.Join(targetDir, key[4:]) found, err = xf.PathExists(fullishPath) if err == nil && !found { var dest *os.File dest, err = os.Create(fullishPath) if err == nil { var count int defer dest.Close() count, err = dest.Write(data) if err == nil { length = int64(count) } } } } return }
func setUpMB3(c *C, rng *xr.PRNG) ( filter *MappedBloomSHA, m, k uint, keys [][]byte, backingFile string) { m = 20 k = 8 keys = make([][]byte, 100) for i := 0; i < 100; i++ { keys[i] = make([]byte, 20) } backingFile = "tmp/" + rng.NextFileName(8) // make sure the file does not already exist found, err := xf.PathExists(backingFile) c.Assert(err, IsNil) for found { backingFile = "tmp/" + rng.NextFileName(8) found, err = xf.PathExists(backingFile) c.Assert(err, IsNil) } return }
func CreateMerkleDocFromFileSystem(pathToDir string, whichSHA int, exclusions, matches []string) (md *MerkleDoc, err error) { if len(pathToDir) == 0 { err = NilPath } if err == nil { var found bool found, err = xf.PathExists(pathToDir) if err == nil && !found { err = FileNotFound } } // get the path to the directory, excluding the directory name var ( path string // dirName string exRE, matchRE *re.Regexp tree *MerkleTree ) if strings.HasSuffix(pathToDir, "/") { pathToDir = pathToDir[:len(pathToDir)-1] // drop trailing slash } parts := strings.Split(pathToDir, "/") if len(parts) == 1 { path = "." // dirName = pathToDir } else { partCount := len(parts) // dirName = parts[partCount - 1] parts = parts[:partCount-1] path = strings.Join(parts, "/") } if exclusions != nil { exRE, err = MakeExRE(exclusions) if err == nil && matches != nil { matchRE, err = MakeMatchRE(matches) } } if err == nil { tree, err = CreateMerkleTreeFromFileSystem( pathToDir, whichSHA, exRE, matchRE) if err == nil { // "creates the hash" md, err = NewMerkleDoc(path, whichSHA, false, tree, exRE, matchRE) if err == nil { md.bound = true } } } return }
func SHA3File(pathToFile string) (hash []byte, err error) { var data []byte found, err := xf.PathExists(pathToFile) if err == nil && !found { err = FileNotFound } if err == nil { data, err = ioutil.ReadFile(pathToFile) if err == nil { digest := sha3.New256() digest.Write(data) hash = digest.Sum(nil) } } return }
// returns the SHA1 binHash of the contents of a file func FileBinSHA1(path string) (binHash []byte, err error) { var data2 []byte binHash = xu.SHA1_BIN_NONE found, err := xf.PathExists(path) if err == nil && !found { err = errors.New("IllegalArgument: empty path or non-existent file") } if err == nil { data2, err = ioutil.ReadFile(path) } if err == nil { d2 := sha1.New() d2.Write(data2) binHash = d2.Sum(nil) } return }
// NextDataDir creates a directory tree populated with data filep. // // BUGS // * on at least one occasion with width = 4 only 3 files/directories // were created at the top level (2 were subdirs) // DEFICIENCIES: // * no control over percentage of directories // * no guarantee that depth will be reached // func (p *PRNG) NextDataDir(pathToDir string, depth int, width int, maxLen int, minLen int) { // number of directory levels; 1 means no subdirectories if depth < 1 { depth = 1 } // number of members (files, subdirectories) at each level if width < 1 { width = 1 } // XXX may panic pathExists, err := xf.PathExists(pathToDir) if err != nil { panic(err) } if !pathExists { os.MkdirAll(pathToDir, 0755) } subdirSoFar := 0 for i := 0; i < width; i++ { if depth > 1 { if (p.NextFloat32() > 0.25) && ((i < width-1) || (subdirSoFar > 0)) { // 25% are subdirs // data file i // SPECIFICATION ERROR: file name may not be unique // count, pathToFile p.NextDataFile(pathToDir, maxLen, minLen) } else { // directory subdirSoFar += 1 // create unique name fileName := p.NextFileName(16) pathToSubdir := pathToDir + "/" + fileName p.NextDataDir(pathToSubdir, depth-1, width, maxLen, minLen) } } else { // data file // XXX SPECIFICATION ERROR: file name may not be unique // count, pathToFile p.NextDataFile(pathToDir, maxLen, minLen) } } }
func (s *XLSuite) doTestPutData(c *C, u UI, digest hash.Hash) { // we are testing (len,hash) = putData3(data, key) var dPath, dKey, uKey string var dLen, uLen int64 var err error rng := u.GetRNG() dLen, dPath = rng.NextDataFile(dataPath, 16*1024, 1) switch whichSHA { case xu.USING_SHA1: dKey, err = FileHexSHA1(dPath) case xu.USING_SHA2: dKey, err = FileHexSHA2(dPath) case xu.USING_SHA3: dKey, err = FileHexSHA3(dPath) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) data, err := ioutil.ReadFile(dPath) c.Assert(err, Equals, nil) c.Assert(int64(len(data)), Equals, dLen) switch whichSHA { case xu.USING_SHA1: uLen, uKey, err = u.PutData1(data, dKey) case xu.USING_SHA2: uLen, uKey, err = u.PutData2(data, dKey) case xu.USING_SHA3: uLen, uKey, err = u.PutData3(data, dKey) // XXX DEFAULT = ERROR } c.Assert(err, Equals, nil) c.Assert(dLen, Equals, uLen) c.Assert(dKey, Equals, uKey) found, err := u.HexKeyExists(uKey) c.Assert(err, Equals, nil) c.Assert(found, Equals, true) xPath, err := u.GetPathForHexKey(uKey) c.Assert(err, IsNil) found, err = xf.PathExists(xPath) c.Assert(err, IsNil) c.Assert(found, Equals, true) }
func (s *XLSuite) doNextDataDirTest(c *C, rng *PRNG, width int, depth int) { dirName := rng.NextFileName(8) dirPath := TMP_DIR + "/" + dirName pathExists, err := xf.PathExists(dirPath) if err != nil { panic("error invoking xf.PathExists on " + dirPath) } if pathExists { if strings.HasPrefix(dirPath, "/") { panic("attempt to remove absolute path " + dirPath) } if strings.Contains(dirPath, "..") { panic("attempt to remove path containing ..: " + dirPath) } os.RemoveAll(dirPath) } rng.NextDataDir(dirPath, width, depth, 32, 0) }
// inFile is the path to a local file which will be renamed into U (or deleted // if it is already present in U) // u.path is an absolute or relative path to a U directory organized _FLAT // key is an sha3 content hash. // If the operation succeeds we return the length of the file (which must // not be zero. Otherwise we return 0. // We don't do much checking. // func (u *UFlat) Put3(inFile, key string) ( length int64, hash string, err error) { var fullishPath string hash, err = FileHexSHA3(inFile) if err != nil { fmt.Printf("DEBUG: FileHexSHA3 returned error %v\n", err) return } if hash != key { fmt.Printf("expected %s to have key %s, but the content key is %s\n", inFile, key, hash) err = errors.New("IllegalArgument: Put3: key does not match content") return } info, err := os.Stat(inFile) if err != nil { return } length = info.Size() if err == nil { var found bool fullishPath = filepath.Join(u.path, key) found, err = xf.PathExists(fullishPath) if err == nil { if found { // drop the temporary input file err = os.Remove(inFile) } else { // rename the temporary file into U err = os.Rename(inFile, fullishPath) } } } if err == nil { err = os.Chmod(fullishPath, 0444) } return }
// Create just the Node for this member and write it to the conventional // place in the file system. func (mm *MemberMaker) PersistNode() (err error) { var ( config string ) // XXX check attrs, etc lfs := mm.ClusterMember.Node.GetLFS() pathToCfgDir := path.Join(lfs, ".xlattice") pathToCfgFile := path.Join(pathToCfgDir, "node.config") found, err := xf.PathExists(pathToCfgDir) if err == nil && !found { err = xf.CheckLFS(pathToCfgDir, 0750) } if err == nil { //mm.Node = *node config = mm.Node.String() } if err == nil { err = ioutil.WriteFile(pathToCfgFile, []byte(config), 0600) } return }
func NewMerkleDoc(pathToDir string, whichSHA int, binding bool, tree *MerkleTree, exRE, matchRE *re.Regexp) (m *MerkleDoc, err error) { if pathToDir == "" { err = EmptyPath } if err == nil { if strings.HasSuffix(pathToDir, "/") { pathToDir = pathToDir[:len(pathToDir)-1] } self := MerkleDoc{ exRE: exRE, matchRE: matchRE, path: pathToDir, whichSHA: whichSHA, } p := &self if tree != nil { err = p.SetTree(tree) } else if !binding { err = NilTreeButNotBinding } if err == nil && binding { var whether bool fullerPath := path.Join(pathToDir, tree.name) whether, err = xf.PathExists(fullerPath) if err == nil && !whether { err = DirectoryNotFound } } if err == nil { m = p } } return }
func CreateMerkleTreeFromFileSystem(pathToDir string, whichSHA int, exRE, matchRE *re.Regexp) (tree *MerkleTree, err error) { var ( dirName string files []os.FileInfo ) found, err := xf.PathExists(pathToDir) if err == nil && !found { err = FileNotFound } if err == nil { parts := strings.Split(pathToDir, "/") if len(parts) == 1 { dirName = pathToDir } else { dirName = parts[len(parts)-1] } tree, err = NewMerkleTree(dirName, whichSHA, exRE, matchRE) } if err == nil { var shaX hash.Hash // we are promised that this is sorted files, err = ioutil.ReadDir(pathToDir) switch whichSHA { case xu.USING_SHA1: shaX = sha1.New() case xu.USING_SHA2: shaX = sha256.New() case xu.USING_SHA3: shaX = sha3.New256() // XXX DEFAULT = ERROR } shaXCount := 0 for i := 0; i < len(files); i++ { var node MerkleNodeI file := files[i] name := file.Name() // XXX should continue if any exRE matches if exRE != nil && exRE.MatchString(name) { continue } // XXX should NOT continue if any matchRE match if matchRE != nil && !matchRE.MatchString(name) { continue } pathToFile := path.Join(pathToDir, name) mode := file.Mode() if mode&os.ModeSymlink != 0 { // DEBUG fmt.Printf(" LINK: %s, skipping\n", name) // END continue } else if mode.IsDir() { node, err = CreateMerkleTreeFromFileSystem( pathToFile, whichSHA, exRE, matchRE) } else if mode.IsRegular() { // XXX will this ignore symlinks? node, err = CreateMerkleLeafFromFileSystem( pathToFile, name, whichSHA) } if err != nil { break } if node != nil { // update tree-level hash if node.GetHash() != nil { // IS THIS POSSIBLE? shaXCount++ shaX.Write(node.GetHash()) tree.nodes = append(tree.nodes, node) } } } if err == nil && shaXCount > 0 { tree.SetHash(shaX.Sum(nil)) } } return }
func NewUpaxServer(ckPriv, skPriv *rsa.PrivateKey, cm *xcl.ClusterMember, whichSHA int) (us *UpaxServer, err error) { var ( count int lfs string // path to local file system f *os.File // file for debugging log pathToLog string logger *log.Logger uDir u.UI pathToU string entries *xi.IDMap ftLogFile *os.File pathToFTLog string // conventionally lfs/U/L ) if ckPriv == nil || ckPriv == nil { err = NilRSAKey } else if cm == nil { err = NilClusterMember } if err == nil { serverVersion, err = xu.ParseDecimalVersion(VERSION) } if err == nil { // whatever created cm should have created the local file system // and written the node configuration to // LFS/.xlattice/cluster.member.config. Let's make sure that // that exists before proceeding. lfs = cm.GetLFS() // This should be passed in opt.Logger pathToLog = filepath.Join(lfs, "log") f, err = os.OpenFile(pathToLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640) if err == nil { logger = log.New(f, "", log.Ldate|log.Ltime) } pathToCfg := filepath.Join( filepath.Join(lfs, ".xlattice"), "cluster.member.config") var found bool found, err = xf.PathExists(pathToCfg) if err == nil && found == false { err = ClusterConfigNotFound } } if f != nil { defer f.Close() } if err == nil { // DEBUG fmt.Printf("creating directory tree in %s\n", lfs) // END pathToU = filepath.Join(lfs, "U") uDir, err = u.New(pathToU, u.DIR16x16, 0) } if err == nil { entries, err = xi.NewNewIDMap() // with default depth } if err == nil { var found bool pathToFTLog = filepath.Join(pathToU, "L") found, err = xf.PathExists(pathToFTLog) if err == nil { if found { fmt.Printf("ftLog file exists\n") count, err = loadEntries(pathToFTLog, entries, whichSHA) if err == nil { // reopen it 0600 for appending ftLogFile, err = os.OpenFile(pathToFTLog, os.O_WRONLY|os.O_APPEND, 0600) } } else { // open it for appending ftLogFile, err = os.OpenFile(pathToFTLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) } } } if err == nil { us = &UpaxServer{ DoneCh: make(chan bool, 2), PathToDebugLog: pathToLog, Logger: logger, uDir: uDir, entries: entries, ftLogFile: ftLogFile, pathToFTLog: pathToFTLog, entryCount: count, ckPriv: ckPriv, skPriv: skPriv, ClusterMember: *cm, } } return }
// This was copied from cluster_test.go and minimal changes have been // made. // func (s *XLSuite) doTestPair(c *C, rng *xr.PRNG, whichSHA int) { if VERBOSITY > 0 { fmt.Printf("TEST_PAIR whichSHA = %v\n", whichSHA) } // read regCred.dat to get keys etc for a registry -------------- dat, err := ioutil.ReadFile("regCred.dat") c.Assert(err, IsNil) regCred, err := reg.ParseRegCred(string(dat)) c.Assert(err, IsNil) regServerName := regCred.Name regServerID := regCred.ID regServerEnd := regCred.EndPoints[0] regServerCK := regCred.CommsPubKey regServerSK := regCred.SigPubKey // Devise a unique cluster name. We rely on the convention ----- // that in Upax tests, the local file system for Upax servers is // tmp/CLUSTER-NAME/SERVER-NAME. clusterName := rng.NextFileName(8) clusterPath := filepath.Join("tmp", clusterName) found, err := xf.PathExists(clusterPath) c.Assert(err, IsNil) for found { clusterName = rng.NextFileName(8) clusterPath = filepath.Join("tmp", clusterName) found, err = xf.PathExists(clusterPath) c.Assert(err, IsNil) } // Set the test size in various senses -------------------------- // K1 is the number of upax servers, and so the cluster size. K2 is // the number of upax clients, M the number of messages sent (items to // be added to the Upax store), LMin and LMax message lengths. K1 := uint32(2) K2 := 1 M := 16 + rng.Intn(16) // 16..31 LMin := 64 + rng.Intn(64) LMax := 128 + rng.Intn(128) // Use an admin client to get a clusterID for this clusterName -- const EP_COUNT = 2 an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil) c.Assert(err, IsNil) an.Start() cn := &an.MemberMaker <-cn.DoneCh clusterID := cn.ClusterID if clusterID == nil { fmt.Println("NIL CLUSTER ID: is xlReg running??") } c.Assert(clusterID, NotNil) // FAILS 2016-11-13 clusterSize := cn.ClusterMaxSize c.Assert(clusterSize, Equals, uint32(K1)) epCount := cn.EPCount c.Assert(epCount, Equals, uint32(EP_COUNT)) // DEBUG // fmt.Printf("cluster %s: %s\n", clusterName, clusterID.String()) // END // Create names and LFSs for the K1 servers --------------------- // We create a distinct tmp/clusterName/serverName for each // server as its local file system (LFS). serverNames := make([]string, K1) serverPaths := make([]string, K1) ckPriv := make([]*rsa.PrivateKey, K1) skPriv := make([]*rsa.PrivateKey, K1) for i := uint32(0); i < K1; i++ { serverNames[i] = rng.NextFileName(8) serverPaths[i] = filepath.Join(clusterPath, serverNames[i]) found, err = xf.PathExists(serverPaths[i]) c.Assert(err, IsNil) for found { serverNames[i] = rng.NextFileName(8) serverPaths[i] = filepath.Join(clusterPath, serverNames[i]) found, err = xf.PathExists(serverPaths[i]) c.Assert(err, IsNil) } err = os.MkdirAll(serverPaths[i], 0750) c.Assert(err, IsNil) ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(ckPriv[i], NotNil) skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(skPriv[i], NotNil) } // create K1 reg client nodes ----------------------------------- uc := make([]*reg.UserMember, K1) for i := uint32(0); i < K1; i++ { var ep *xt.TcpEndPoint ep, err = xt.NewTcpEndPoint("127.0.0.1:0") c.Assert(err, IsNil) e := []xt.EndPointI{ep} uc[i], err = reg.NewUserMember(serverNames[i], serverPaths[i], ckPriv[i], skPriv[i], regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, cn.ClusterAttrs, cn.ClusterID, K1, EP_COUNT, e) c.Assert(err, IsNil) c.Assert(uc[i], NotNil) c.Assert(uc[i].ClusterID, NotNil) } // Start the K1 reg client nodes running ------------------------ for i := uint32(0); i < K1; i++ { uc[i].Start() } // wait until all reg clientNodes are done ---------------------- for i := uint32(0); i < K1; i++ { err := <-uc[i].MemberMaker.DoneCh c.Assert(err, IsNil) } // verify that all clientNodes have meaningful baseNodes -------- //for i := 0; i < K1; i++ { // c.Assert(uc[i].GetName(), Equals, serverNames[i]) // c.Assert(uc[i].GetNodeID(), NotNil) // c.Assert(uc[i].GetCommsPublicKey(), NotNil) // c.Assert(uc[i].GetSigPublicKey(), NotNil) //} // verify that all clientNode members have meaningful baseNodes - for i := uint32(0); i < K1; i++ { // fmt.Printf(" server %s\n", serverNames[i]) // DEBUG memberCount := uint32(len(uc[i].Members)) c.Assert(memberCount, Equals, K1) for j := uint32(0); j < memberCount; j++ { c.Assert(uc[i].Members[j], NotNil) // DEBUG // fmt.Printf(" other server[%d] is %s\n", j, serverNames[j]) // END // doesn't work because reg server does not necessarily see // members in serverName order. // c.Assert(uc[i].Members[j].GetName(), Equals, serverNames[j]) c.Assert(uc[i].Members[j].Peer.GetName() == "", Equals, false) c.Assert(uc[i].Members[j].Peer.GetNodeID(), NotNil) c.Assert(uc[i].Members[j].Peer.GetCommsPublicKey(), NotNil) c.Assert(uc[i].Members[j].Peer.GetSigPublicKey(), NotNil) } } // convert the reg client nodes to UpaxServers ------------------ us := make([]*UpaxServer, K1) for i := uint32(0); i < K1; i++ { err = uc[i].PersistClusterMember() // sometimes panics c.Assert(err, IsNil) us[i], err = NewUpaxServer( ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA) c.Assert(err, IsNil) c.Assert(us[i], NotNil) } // verify files are present and then start the servers ---------- // 11-07 TODO, modified: // Run() causes each server to send ItsMe to all other servers; // as each gets its Ack, it starts the KeepAlive/Ack cycle running // at a 50 ms interval specified as a Run() argument and then sends // on DoneCh. Second parameter is lifetime of the server in // keep-alives, say 20 (so 1 sec in total). When this time has // passed, the server will send again on DoneCh, and then shut down. // XXX STUB for i := uint32(0); i < K1; i++ { err = us[i].Run(10*time.Millisecond, 20) c.Assert(err, IsNil) } // Verify servers are running ------------------------- // 11-18: we wait for the first done from each server. // // XXX STUB for i := uint32(0); i < K1; i++ { <-us[i].DoneCh } // DEBUG fmt.Println("pair_test: both servers have sent first DONE") // END // When all UpaxServers are ready, create K2 clients.-- // Each upax client creates K3 separate datums of different // length (L1..L2) and content. Each client signals // when done. // XXX STUB // Verify for each of the K2 clients ------------------ // that its data is present on the selected server. We // do this by an Exists() call on uDir for the server's // LFS/U for each item posted. // XXX STUB // After a reasonable deltaT, verify that both servers-- // have a copy of each and every datum. // XXX STUB _, _, _, _ = K2, M, LMin, LMax }
func (s *XLSuite) TestChunkListAssyDisassy(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_CHUNK_LIST_ASSY_DISASSY") } rng := xr.MakeSimpleRNG() // make a slice 3 to 7 chunks long, fill with random-ish data --- chunkCount := 3 + rng.Intn(5) // so 3 to 7, inclusive lastChunkLen := 1 + rng.Intn(MAX_DATA_BYTES-1) dataLen := (chunkCount-1)*MAX_DATA_BYTES + lastChunkLen data := make([]byte, dataLen) rng.NextBytes(data) // calculate datum, the SHA hash of the data -------------------- //d := sha3.NewKeccak256() d := sha1.New() d.Write(data) hash := d.Sum(nil) datum, err := xi.NewNodeID(hash) c.Assert(err, IsNil) // create tmp if it doesn't exist ------------------------------- found, err := xf.PathExists("tmp") c.Assert(err, IsNil) if !found { err = os.MkdirAll("tmp", 0755) c.Assert(err, IsNil) } // create scratch subdir with unique name ----------------------- var pathToU string for { dirName := rng.NextFileName(8) pathToU = path.Join("tmp", dirName) found, err = xf.PathExists(pathToU) c.Assert(err, IsNil) if !found { break } } // create a FLAT uDir at that point ----------------------------- myU, err := u.New(pathToU, u.DIR_FLAT, 0) // 0 means default perm c.Assert(err, IsNil) // write the test data into uDir -------------------------------- bytesWritten, key, err := myU.PutData(data, datum.Value()) c.Assert(err, IsNil) c.Assert(bytes.Equal(datum.Value(), key), Equals, true) c.Assert(bytesWritten, Equals, int64(dataLen)) skPriv, err := rsa.GenerateKey(rand.Reader, 1024) // cheap key sk := &skPriv.PublicKey c.Assert(err, IsNil) c.Assert(skPriv, NotNil) // Verify the file is present in uDir --------------------------- // (yes this is a test of uDir logic but these are early days --- // XXX uDir.Exist(arg) - arg should be []byte, no string keyStr := hex.EncodeToString(key) found, err = myU.HexKeyExists(keyStr) c.Assert(err, IsNil) c.Assert(found, Equals, true) // use the data file to build a chunkList, writing the chunks --- title := rng.NextFileName(8) now := xu.Timestamp(time.Now().UnixNano()) // make a reader -------------------------------------- pathToData, err := myU.GetPathForHexKey(keyStr) c.Assert(err, IsNil) reader, err := os.Open(pathToData) // open for read only c.Assert(err, IsNil) defer reader.Close() chunkList, err := NewChunkList(sk, title, now, reader, int64(dataLen), key, myU) c.Assert(err, IsNil) err = chunkList.Sign(skPriv) c.Assert(err, IsNil) digSig, err := chunkList.GetDigSig() c.Assert(err, IsNil) c.Assert(bytes.Equal(digSig, chunkList.digSig), Equals, true) err = chunkList.Verify() c.Assert(err, IsNil) // REBUILD AND CHECK -------------------------------------------- // rebuild the complete file from the chunkList and files present // in myU u2, err := u.New(pathToU, u.DIR_FLAT, 0) // 0 means default perm c.Assert(err, IsNil) var data2 []byte // should become copy of original count := chunkList.Size() for i := uint(0); i < count; i++ { chunkHash, err := chunkList.HashItem(i) c.Assert(err, IsNil) c.Assert(chunkHash, NotNil) var raw []byte // THIS IS A CHUNK, and so has a header, possibly some // padding, and then its own hash :-). Need to verify // and discard the last, then drop the padding. // CORRECTION: hash should NOT have been written to disk raw, err = u2.GetData(chunkHash) c.Assert(err, IsNil) chunk := &Chunk{packet: raw} ndx := chunk.GetIndex() c.Assert(ndx, Equals, uint32(i)) rawLen := uint32(len(raw)) dataLen := chunk.GetDataLen() //fmt.Printf("chunk %2d: index is %8d\n", i, ndx) //fmt.Printf(" len raw is %6d (%4x)\n", rawLen, rawLen) //fmt.Printf(" dataLen is %6d (%4x)\n", dataLen, dataLen) // if this isn't true, we get a panic c.Assert(dataLen < rawLen, Equals, true) payload := chunk.GetData() data2 = append(data2, payload...) } // verify that the content key of the rebuilt file is identical to // that of the original //d2D := sha3.NewKeccak256() d2D := sha1.New() d2D.Write(data2) hash2 := d2D.Sum(nil) datum2, err := xi.NewNodeID(hash2) c.Assert(err, IsNil) // DEBUG //fmt.Printf("datum: %x\ndatum2: %x\n", datum.Value(), datum2.Value()) //fmt.Printf("data: %x\ndata2: %x\n", data, data2) // END c.Assert(bytes.Equal(datum.Value(), datum2.Value()), Equals, true) // presumably pure pedantry: verify that the file contents are // also equal c.Assert(bytes.Equal(data, data2), Equals, true) }