func (t *rpcType) cleanup(request sub.CleanupRequest, reply *sub.CleanupResponse) error { t.disableScannerFunc(true) defer t.disableScannerFunc(false) t.rwLock.Lock() defer t.rwLock.Unlock() t.logger.Printf("Cleanup(): %d objects\n", len(request.Hashes)) if t.fetchInProgress { t.logger.Println("Error: fetch in progress") return errors.New("fetch in progress") } if t.updateInProgress { t.logger.Println("Error: update progress") return errors.New("update in progress") } for _, hash := range request.Hashes { pathname := path.Join(t.objectsDir, objectcache.HashToFilename(hash)) err := fsutil.ForceRemove(pathname) if err == nil { t.logger.Printf("Deleted: %s\n", pathname) } else { t.logger.Println(err) } } return nil }
func (objSrv *ObjectServer) addObject(reader io.Reader, length uint64, expectedHash *hash.Hash) (hash.Hash, bool, error) { hashVal, data, err := objectcache.ReadObject(reader, length, expectedHash) if err != nil { return hashVal, false, err } length = uint64(len(data)) filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hashVal)) // Check for existing object and collision. fi, err := os.Lstat(filename) if err == nil { if !fi.Mode().IsRegular() { return hashVal, false, errors.New("Existing non-file: " + filename) } if err := collisionCheck(data, filename, fi.Size()); err != nil { return hashVal, false, errors.New( "Collision detected: " + err.Error()) } // No collision and no error: it's the same object. Go home early. return hashVal, false, nil } if err = os.MkdirAll(path.Dir(filename), syscall.S_IRWXU); err != nil { return hashVal, false, err } if err := fsutil.CopyToFile(filename, filePerms, bytes.NewReader(data), length); err != nil { return hashVal, false, err } objSrv.rwLock.Lock() objSrv.sizesMap[hashVal] = uint64(len(data)) objSrv.rwLock.Unlock() return hashVal, true, nil }
func makeRegularInode(fullPathname string, inode *filesystem.RegularInode, multiplyUsedObjects map[hash.Hash]uint64) { var err error if inode.Size > 0 { objectPathname := path.Join(objectsDir, objectcache.HashToFilename(inode.Hash)) numCopies := multiplyUsedObjects[inode.Hash] if numCopies > 1 { numCopies-- objectPathname += strings.Repeat("~", int(numCopies)) if numCopies < 2 { delete(multiplyUsedObjects, inode.Hash) } } err = os.Rename(objectPathname, fullPathname) } else { _, err = os.Create(fullPathname) } if err != nil { logger.Println(err) return } if err := inode.WriteMetadata(fullPathname); err != nil { logger.Println(err) } else { if inode.Size > 0 { logger.Printf("Made inode: %s from: %x\n", fullPathname, inode.Hash) } else { logger.Printf("Made empty inode: %s\n", fullPathname) } } }
func readOne(objectsDir string, hash hash.Hash, length uint64, reader io.Reader) error { filename := path.Join(objectsDir, objectcache.HashToFilename(hash)) dirname := path.Dir(filename) if err := os.MkdirAll(dirname, syscall.S_IRWXU); err != nil { return err } return fsutil.CopyToFile(filename, reader, int64(length)) }
func (objSrv *ObjectServer) deleteObject(hashVal hash.Hash) error { filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hashVal)) if err := os.Remove(filename); err != nil { return err } objSrv.rwLock.Lock() delete(objSrv.sizesMap, hashVal) objSrv.rwLock.Unlock() return nil }
func copyFilesToCache(filesToCopyToCache []sub.FileToCopyToCache, rootDirectoryName string) { for _, fileToCopy := range filesToCopyToCache { sourcePathname := path.Join(rootDirectoryName, fileToCopy.Name) destPathname := path.Join(objectsDir, objectcache.HashToFilename(fileToCopy.Hash)) if copyFile(destPathname, sourcePathname) { logger.Printf("Copied: %s to cache\n", sourcePathname) } } }
func (t *rpcType) copyFilesToCache(filesToCopyToCache []sub.FileToCopyToCache, rootDirectoryName string) { for _, fileToCopy := range filesToCopyToCache { sourcePathname := path.Join(rootDirectoryName, fileToCopy.Name) destPathname := path.Join(t.objectsDir, objectcache.HashToFilename(fileToCopy.Hash)) if err := copyFile(destPathname, sourcePathname); err != nil { t.logger.Println(err) } else { t.logger.Printf("Copied: %s to cache\n", sourcePathname) } } }
func (objSrv *ObjectServer) addObject(data []byte, expectedHash *hash.Hash) ( hash.Hash, bool, error) { var hash hash.Hash if len(data) < 1 { return hash, false, errors.New("zero length object cannot be added") } hasher := sha512.New() if hasher.Size() != len(hash) { return hash, false, errors.New("Incompatible hash size") } _, err := hasher.Write(data) if err != nil { return hash, false, err } copy(hash[:], hasher.Sum(nil)) if expectedHash != nil { if hash != *expectedHash { return hash, false, errors.New(fmt.Sprintf( "Hash mismatch. Computed=%x, expected=%x", hash, *expectedHash)) } } filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hash)) // Check for existing object and collision. fi, err := os.Lstat(filename) if err == nil { if !fi.Mode().IsRegular() { return hash, false, errors.New("Existing non-file: " + filename) } err := collisionCheck(data, filename, fi.Size()) if err != nil { return hash, false, errors.New("Collision detected: " + err.Error()) } // No collision and no error: it's the same object. Go home early. return hash, false, nil } err = os.MkdirAll(path.Dir(filename), 0755) if err != nil { return hash, false, err } file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0660) if err != nil { return hash, false, err } defer file.Close() _, err = file.Write(data) if err != nil { return hash, false, err } objSrv.sizesMap[hash] = uint64(len(data)) return hash, true, nil }
func makeObjectCopies(multiplyUsedObjects map[hash.Hash]uint64) { for hash, numCopies := range multiplyUsedObjects { if numCopies < 2 { continue } objectPathname := path.Join(objectsDir, objectcache.HashToFilename(hash)) for numCopies--; numCopies > 0; numCopies-- { ext := strings.Repeat("~", int(numCopies)) if copyFile(objectPathname+ext, objectPathname) { logger.Printf("Copied object: %x%s\n", hash, ext) } } } }
func (objSrv *objectServer) AddObject(reader io.Reader, length uint64, expectedHash *hash.Hash) (hash.Hash, bool, error) { hashVal, data, err := objectcache.ReadObject(reader, length, expectedHash) if err != nil { return hashVal, false, err } filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hashVal)) if err = os.MkdirAll(path.Dir(filename), dirPerms); err != nil { return hashVal, false, err } if err := fsutil.CopyToFile(filename, filePerms, bytes.NewReader(data), length); err != nil { return hashVal, false, err } return hashVal, true, nil }
func (t *rpcType) makeObjectCopies(multiplyUsedObjects map[hash.Hash]uint64) { for hash, numCopies := range multiplyUsedObjects { if numCopies < 2 { continue } objectPathname := path.Join(t.objectsDir, objectcache.HashToFilename(hash)) for numCopies--; numCopies > 0; numCopies-- { ext := strings.Repeat("~", int(numCopies)) if err := copyFile(objectPathname+ext, objectPathname); err != nil { t.logger.Println(err) } else { t.logger.Printf("Copied object: %x%s\n", hash, ext) } } } }
func (or *ObjectsReader) nextObject() (uint64, io.ReadCloser, error) { or.nextIndex++ if or.nextIndex >= int64(len(or.hashes)) { return 0, nil, errors.New("all objects have been consumed") } filename := path.Join(or.objectServer.baseDir, objectcache.HashToFilename(or.hashes[or.nextIndex])) file, err := os.Open(filename) if err != nil { return 0, nil, err } fi, err := file.Stat() if err != nil { file.Close() return 0, nil, err } return uint64(fi.Size()), file, nil }
func (t *rpcType) copyFilesToCache(filesToCopyToCache []sub.FileToCopyToCache, rootDirectoryName string) { for _, fileToCopy := range filesToCopyToCache { sourcePathname := path.Join(rootDirectoryName, fileToCopy.Name) destPathname := path.Join(t.objectsDir, objectcache.HashToFilename(fileToCopy.Hash)) prefix := "Copied" if fileToCopy.DoHardlink { prefix = "Hardlinked" } if err := copyFile(destPathname, sourcePathname, fileToCopy.DoHardlink); err != nil { t.lastUpdateError = err t.logger.Println(err) } else { t.logger.Printf("%s: %s to cache\n", prefix, sourcePathname) } } }
func makeRegularInode(fullPathname string, inode *filesystem.RegularInode, multiplyUsedObjects map[hash.Hash]uint64, objectsDir string, logger *log.Logger) error { var objectPathname string if inode.Size > 0 { objectPathname = path.Join(objectsDir, objectcache.HashToFilename(inode.Hash)) numCopies := multiplyUsedObjects[inode.Hash] if numCopies > 1 { numCopies-- objectPathname += fmt.Sprintf("~%d~", numCopies) if numCopies < 2 { delete(multiplyUsedObjects, inode.Hash) } else { multiplyUsedObjects[inode.Hash] = numCopies } } } else { objectPathname = fmt.Sprintf("%s.empty.%d", fullPathname, os.Getpid()) if file, err := os.OpenFile(objectPathname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600); err != nil { return err } else { file.Close() } } if err := fsutil.ForceRename(objectPathname, fullPathname); err != nil { logger.Println(err) return err } if err := inode.WriteMetadata(fullPathname); err != nil { logger.Println(err) return err } else { if inode.Size > 0 { logger.Printf("Made inode: %s from: %x\n", fullPathname, inode.Hash) } else { logger.Printf("Made empty inode: %s\n", fullPathname) } } return nil }
func (objSrv *ObjectServer) checkObject(hash hash.Hash) (uint64, error) { if size, ok := objSrv.sizesMap[hash]; ok { return size, nil } filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hash)) fi, err := os.Lstat(filename) if err != nil { return 0, nil } if fi.Mode().IsRegular() { if fi.Size() < 1 { return 0, errors.New(fmt.Sprintf("zero length file: %s", filename)) } size := uint64(fi.Size()) objSrv.sizesMap[hash] = size return size, nil } return 0, nil }
func convertToObject(pathname, objectsDir string) error { file, err := os.Open(pathname) if err != nil { return err } defer file.Close() hasher := sha512.New() _, err = io.Copy(hasher, file) if err != nil { return err } var hashVal hash.Hash copy(hashVal[:], hasher.Sum(nil)) objPathname := path.Join(objectsDir, objectcache.HashToFilename(hashVal)) if err = os.MkdirAll(path.Dir(objPathname), 0755); err != nil { return err } return fsutil.ForceRename(pathname, objPathname) }
func readOne(hash hash.Hash, reader io.Reader) error { filename := path.Join(objectsDir, objectcache.HashToFilename(hash)) dirname := path.Dir(filename) err := os.MkdirAll(dirname, syscall.S_IRUSR|syscall.S_IWUSR) if err != nil { return err } file, err := os.Create(filename) if err != nil { return err } defer file.Close() writer := bufio.NewWriter(file) defer writer.Flush() _, err = io.Copy(writer, reader) if err != nil { return errors.New(fmt.Sprintf("error copying: %s", err.Error())) } return nil }
func (objSrv *ObjectServer) addObject(reader io.Reader, length uint64, expectedHash *hash.Hash) (hash.Hash, bool, error) { var hash hash.Hash if length < 1 { return hash, false, errors.New("zero length object cannot be added") } data := make([]byte, length) nRead, err := io.ReadFull(reader, data) if err != nil { return hash, false, err } if uint64(nRead) != length { return hash, false, errors.New(fmt.Sprintf( "failed to read data, wanted: %d, got: %d bytes", length, nRead)) } hasher := sha512.New() if hasher.Size() != len(hash) { return hash, false, errors.New("Incompatible hash size") } if _, err := hasher.Write(data); err != nil { return hash, false, err } copy(hash[:], hasher.Sum(nil)) if expectedHash != nil { if hash != *expectedHash { return hash, false, errors.New(fmt.Sprintf( "Hash mismatch. Computed=%x, expected=%x", hash, *expectedHash)) } } filename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hash)) // Check for existing object and collision. fi, err := os.Lstat(filename) if err == nil { if !fi.Mode().IsRegular() { return hash, false, errors.New("Existing non-file: " + filename) } if err := collisionCheck(data, filename, fi.Size()); err != nil { return hash, false, errors.New("Collision detected: " + err.Error()) } // No collision and no error: it's the same object. Go home early. return hash, false, nil } if err = os.MkdirAll(path.Dir(filename), 0755); err != nil { return hash, false, err } tmpFilename := filename + "~" file, err := os.OpenFile(tmpFilename, os.O_CREATE|os.O_WRONLY, 0660) if err != nil { return hash, false, err } defer os.Remove(tmpFilename) defer file.Close() nWritten, err := file.Write(data) if err != nil { return hash, false, err } if nWritten != len(data) { return hash, false, errors.New(fmt.Sprintf( "expected length: %d, got: %d for: %s\n", len(data), nWritten, tmpFilename)) } objSrv.sizesMap[hash] = uint64(len(data)) return hash, true, os.Rename(tmpFilename, filename) }