// Returns true if there is a failure due to missing computed files. func (sub *Sub) buildUpdateRequest(image *image.Image, request *subproto.UpdateRequest, deleteMissingComputedFiles bool, logger *log.Logger) bool { sub.requiredFS = image.FileSystem sub.filter = image.Filter request.Triggers = image.Triggers sub.requiredInodeToSubInode = make(map[uint64]uint64) sub.inodesChanged = make(map[uint64]bool) sub.inodesCreated = make(map[uint64]string) sub.subObjectCacheUsage = make(map[hash.Hash]uint64, len(sub.ObjectCache)) // Populate subObjectCacheUsage. for _, hash := range sub.ObjectCache { sub.subObjectCacheUsage[hash] = 0 } if !filesystem.CompareDirectoriesMetadata(&sub.FileSystem.DirectoryInode, &sub.requiredFS.DirectoryInode, nil) { makeDirectory(request, &sub.requiredFS.DirectoryInode, "/", false) } if sub.compareDirectories(request, &sub.FileSystem.DirectoryInode, &sub.requiredFS.DirectoryInode, "/", deleteMissingComputedFiles, logger) { return true } // Look for multiply used objects and tell the sub. for obj, useCount := range sub.subObjectCacheUsage { if useCount > 1 { if request.MultiplyUsedObjects == nil { request.MultiplyUsedObjects = make(map[hash.Hash]uint64) } request.MultiplyUsedObjects[obj] = useCount } } return false }
func compareDirectories(request *subproto.UpdateRequest, state *state, subDirectory, requiredDirectory *filesystem.Directory, parentName string, filter *filter.Filter) { requiredPathName := path.Join(parentName, requiredDirectory.Name) // First look for entries that should be deleted. makeSubDirectory := false if subDirectory == nil { makeSubDirectory = true } else { subPathName := path.Join(parentName, subDirectory.Name) for name, subEntry := range subDirectory.EntriesByName { pathname := path.Join(subPathName, entryName(subEntry)) if filter.Match(pathname) { continue } if _, ok := requiredDirectory.EntriesByName[name]; !ok { request.PathsToDelete = append(request.PathsToDelete, pathname) fmt.Printf("Delete: %s\n", pathname) // HACK } } if !filesystem.CompareDirectoriesMetadata(subDirectory, requiredDirectory, os.Stdout) { fmt.Printf("Different directory: %s...\n", requiredPathName) // HACK makeSubDirectory = true // TODO(rgooch): Update metadata. } } if makeSubDirectory { var newdir subproto.Directory newdir.Name = requiredPathName newdir.Mode = requiredDirectory.Mode newdir.Uid = requiredDirectory.Uid newdir.Gid = requiredDirectory.Gid request.DirectoriesToMake = append(request.DirectoriesToMake, newdir) } for name, requiredEntry := range requiredDirectory.EntriesByName { pathname := path.Join(requiredPathName, entryName(requiredEntry)) if filter.Match(pathname) { continue } if subDirectory == nil { compareEntries(request, state, nil, requiredEntry, requiredPathName, filter) } else { if subEntry, ok := subDirectory.EntriesByName[name]; ok { compareEntries(request, state, subEntry, requiredEntry, requiredPathName, filter) } else { compareEntries(request, state, nil, requiredEntry, requiredPathName, filter) } } } }
func (sub *Sub) addInode(request *subproto.UpdateRequest, requiredEntry *filesystem.DirectoryEntry, myPathName string) { requiredInode := requiredEntry.Inode() if name, ok := sub.inodesCreated[requiredEntry.InodeNumber]; ok { makeHardlink(request, myPathName, name) return } // Try to find a sibling inode. names := sub.requiredFS.InodeToFilenamesTable()[requiredEntry.InodeNumber] subFS := sub.FileSystem if len(names) > 1 { var sameDataInode filesystem.GenericInode var sameDataName string for _, name := range names { if inum, found := subFS.FilenameToInodeTable()[name]; found { subInode := sub.FileSystem.InodeTable[inum] _, sameMetadata, sameData := filesystem.CompareInodes( subInode, requiredInode, nil) if sameMetadata && sameData { makeHardlink(request, myPathName, name) return } if sameData { sameDataInode = subInode sameDataName = name } } } if sameDataInode != nil { sub.updateMetadata(request, requiredEntry, sameDataName) makeHardlink(request, myPathName, sameDataName) return } } if inode, ok := requiredEntry.Inode().(*filesystem.RegularInode); ok { if inode.Size > 0 { if _, ok := sub.subObjectCacheUsage[inode.Hash]; ok { sub.subObjectCacheUsage[inode.Hash]++ } else { // Not in object cache: grab it from file-system. request.FilesToCopyToCache = append( request.FilesToCopyToCache, sub.getFileToCopy(myPathName, inode.Hash)) sub.subObjectCacheUsage[inode.Hash] = 1 } } } var inode subproto.Inode inode.Name = myPathName inode.GenericInode = requiredEntry.Inode() request.InodesToMake = append(request.InodesToMake, inode) sub.inodesCreated[requiredEntry.InodeNumber] = myPathName }
// Returns true if no update needs to be performed. func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) bool { var state state state.subFS = &sub.fileSystem.FileSystem requiredImage := sub.herd.getImage(sub.requiredImage) state.requiredFS = requiredImage.FileSystem filter := requiredImage.Filter request.Triggers = requiredImage.Triggers state.requiredInodeToSubInode = make(map[uint64]uint64) state.inodesChanged = make(map[uint64]bool) state.inodesCreated = make(map[uint64]string) state.subObjectCacheUsage = make(map[hash.Hash]uint64, len(sub.fileSystem.ObjectCache)) var rusageStart, rusageStop syscall.Rusage syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStart) // Populate subObjectCacheUsage. for _, hash := range sub.fileSystem.ObjectCache { state.subObjectCacheUsage[hash] = 0 } compareDirectories(request, &state, &state.subFS.DirectoryInode, &state.requiredFS.DirectoryInode, "/", filter) // Look for multiply used objects and tell the sub. for obj, useCount := range state.subObjectCacheUsage { if useCount > 1 { if request.MultiplyUsedObjects == nil { request.MultiplyUsedObjects = make(map[hash.Hash]uint64) } request.MultiplyUsedObjects[obj] = useCount } } syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStop) sub.lastComputeUpdateCpuDuration = time.Duration( rusageStop.Utime.Sec)*time.Second + time.Duration(rusageStop.Utime.Usec)*time.Microsecond - time.Duration(rusageStart.Utime.Sec)*time.Second - time.Duration(rusageStart.Utime.Usec)*time.Microsecond computeCpuTimeDistribution.Add(sub.lastComputeUpdateCpuDuration) if len(request.FilesToCopyToCache) > 0 || len(request.InodesToMake) > 0 || len(request.HardlinksToMake) > 0 || len(request.PathsToDelete) > 0 || len(request.DirectoriesToMake) > 0 || len(request.InodesToChange) > 0 { sub.herd.logger.Printf( "buildUpdateRequest(%s) took: %s user CPU time\n", sub.hostname, sub.lastComputeUpdateCpuDuration) return false } return true }
func pushFile(getSubClient getSubClientFunc, source, dest string) error { var sourceStat wsyscall.Stat_t if err := wsyscall.Stat(source, &sourceStat); err != nil { return err } sourceFile, err := os.Open(source) if err != nil { return err } defer sourceFile.Close() srpcClient := getSubClient() objClient := objclient.AttachObjectClient(srpcClient) defer objClient.Close() if err != nil { return err } hashVal, _, err := objClient.AddObject(sourceFile, uint64(sourceStat.Size), nil) if err != nil { return err } newRegularInode := &filesystem.RegularInode{ Mode: filesystem.FileMode(sourceStat.Mode), Uid: sourceStat.Uid, Gid: sourceStat.Gid, MtimeNanoSeconds: int32(sourceStat.Mtim.Nsec), MtimeSeconds: sourceStat.Mtim.Sec, Size: uint64(sourceStat.Size), Hash: hashVal} newInode := sub.Inode{Name: dest, GenericInode: newRegularInode} var updateRequest sub.UpdateRequest var updateReply sub.UpdateResponse updateRequest.Wait = true updateRequest.InodesToMake = append(updateRequest.InodesToMake, newInode) if *triggersFile != "" { updateRequest.Triggers, err = triggers.Load(*triggersFile) if err != nil { return err } } else if *triggersString != "" { updateRequest.Triggers, err = triggers.Decode([]byte(*triggersString)) if err != nil { return err } } startTime := showStart("Subd.Update()") err = client.CallUpdate(srpcClient, updateRequest, &updateReply) showTimeTaken(startTime) return err }
func makeDirectory(request *subproto.UpdateRequest, requiredInode *filesystem.DirectoryInode, pathName string, create bool) { var newInode subproto.Inode newInode.Name = pathName var newDirectoryInode filesystem.DirectoryInode newDirectoryInode.Mode = requiredInode.Mode newDirectoryInode.Uid = requiredInode.Uid newDirectoryInode.Gid = requiredInode.Gid newInode.GenericInode = &newDirectoryInode if create { request.DirectoriesToMake = append(request.DirectoriesToMake, newInode) } else { request.InodesToChange = append(request.InodesToChange, newInode) } }
// Returns (idle, missing), idle=true if no update needs to be performed. func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) ( bool, bool) { waitStartTime := time.Now() sub.herd.computeSemaphore <- struct{}{} computeSlotWaitTimeDistribution.Add(time.Since(waitStartTime)) defer func() { <-sub.herd.computeSemaphore }() request.ImageName = sub.requiredImageName request.Triggers = sub.requiredImage.Triggers var rusageStart, rusageStop syscall.Rusage computeStartTime := time.Now() syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStart) subObj := lib.Sub{ Hostname: sub.mdb.Hostname, FileSystem: sub.fileSystem, ComputedInodes: sub.computedInodes, ObjectCache: sub.objectCache} if lib.BuildUpdateRequest(subObj, sub.requiredImage, request, false, sub.herd.logger) { return false, true } syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStop) computeTimeDistribution.Add(time.Since(computeStartTime)) sub.lastComputeUpdateCpuDuration = time.Duration( rusageStop.Utime.Sec)*time.Second + time.Duration(rusageStop.Utime.Usec)*time.Microsecond - time.Duration(rusageStart.Utime.Sec)*time.Second - time.Duration(rusageStart.Utime.Usec)*time.Microsecond computeCpuTimeDistribution.Add(sub.lastComputeUpdateCpuDuration) if len(request.FilesToCopyToCache) > 0 || len(request.InodesToMake) > 0 || len(request.HardlinksToMake) > 0 || len(request.PathsToDelete) > 0 || len(request.DirectoriesToMake) > 0 || len(request.InodesToChange) > 0 || sub.lastSuccessfulImageName != sub.requiredImageName { sub.herd.logger.Printf( "buildUpdateRequest(%s) took: %s user CPU time\n", sub, sub.lastComputeUpdateCpuDuration) return false, false } return true, false }
func (sub *Sub) updateMetadata(request *subproto.UpdateRequest, requiredEntry *filesystem.DirectoryEntry, myPathName string) { if sub.inodesChanged[requiredEntry.InodeNumber] { return } var inode subproto.Inode inode.Name = myPathName inode.GenericInode = requiredEntry.Inode() request.InodesToChange = append(request.InodesToChange, inode) sub.inodesChanged[requiredEntry.InodeNumber] = true }
func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) { fmt.Println("buildUpdateRequest()") // TODO(rgooch): Delete debugging. subFS := sub.fileSystem requiredImage := sub.herd.getImage(sub.requiredImage) requiredFS := requiredImage.FileSystem filter := requiredImage.Filter request.Triggers = requiredImage.Triggers var state state state.subInodeToRequiredInode = make(map[uint64]uint64) compareDirectories(request, &state, &subFS.Directory, &requiredFS.Directory, "", filter) // TODO(rgooch): Implement this. }
func compareDirectories(request *subproto.UpdateRequest, state *state, subDirectory, requiredDirectory *filesystem.DirectoryInode, myPathName string, filter *filter.Filter) { // First look for entries that should be deleted. if subDirectory != nil { for name := range subDirectory.EntriesByName { pathname := path.Join(myPathName, name) if filter.Match(pathname) { continue } if _, ok := requiredDirectory.EntriesByName[name]; !ok { request.PathsToDelete = append(request.PathsToDelete, pathname) } } } for name, requiredEntry := range requiredDirectory.EntriesByName { pathname := path.Join(myPathName, name) if filter.Match(pathname) { continue } var subEntry *filesystem.DirectoryEntry if subDirectory != nil { if se, ok := subDirectory.EntriesByName[name]; ok { subEntry = se } } if subEntry == nil { addEntry(request, state, requiredEntry, pathname) } else { compareEntries(request, state, subEntry, requiredEntry, pathname, filter) } // If a directory: descend (possibly with the directory for the sub). requiredInode := requiredEntry.Inode() if requiredInode, ok := requiredInode.(*filesystem.DirectoryInode); ok { var subInode *filesystem.DirectoryInode if subEntry != nil { if si, ok := subEntry.Inode().(*filesystem.DirectoryInode); ok { subInode = si } } compareDirectories(request, state, subInode, requiredInode, pathname, filter) } } }
// Returns true if there is a failure due to missing computed files. func (sub *Sub) compareDirectories(request *subproto.UpdateRequest, subDirectory, requiredDirectory *filesystem.DirectoryInode, myPathName string, deleteMissingComputedFiles bool, logger *log.Logger) bool { // First look for entries that should be deleted. if sub.filter != nil && subDirectory != nil { for name := range subDirectory.EntriesByName { pathname := path.Join(myPathName, name) if sub.filter.Match(pathname) { continue } if _, ok := requiredDirectory.EntriesByName[name]; !ok { request.PathsToDelete = append(request.PathsToDelete, pathname) } } } for name, requiredEntry := range requiredDirectory.EntriesByName { pathname := path.Join(myPathName, name) if sub.filter != nil && sub.filter.Match(pathname) { continue } var subEntry *filesystem.DirectoryEntry if subDirectory != nil { if se, ok := subDirectory.EntriesByName[name]; ok { subEntry = se } } requiredInode := requiredEntry.Inode() if _, ok := requiredInode.(*filesystem.ComputedRegularInode); ok { // Replace with computed file. inode, ok := sub.ComputedInodes[pathname] if !ok { if deleteMissingComputedFiles { if subEntry != nil { request.PathsToDelete = append(request.PathsToDelete, pathname) } continue } logger.Printf( "compareDirectories(%s): missing computed file: %s\n", sub, pathname) return true } setComputedFileMtime(inode, subEntry) newEntry := new(filesystem.DirectoryEntry) newEntry.Name = name newEntry.InodeNumber = requiredEntry.InodeNumber newEntry.SetInode(inode) requiredEntry = newEntry } if subEntry == nil { sub.addEntry(request, requiredEntry, pathname) } else { sub.compareEntries(request, subEntry, requiredEntry, pathname) } // If a directory: descend (possibly with the directory for the sub). if requiredInode, ok := requiredInode.(*filesystem.DirectoryInode); ok { var subInode *filesystem.DirectoryInode if subEntry != nil { if si, ok := subEntry.Inode().(*filesystem.DirectoryInode); ok { subInode = si } } sub.compareDirectories(request, subInode, requiredInode, pathname, deleteMissingComputedFiles, logger) } } return false }
func makeHardlink(request *subproto.UpdateRequest, newLink, target string) { var hardlink subproto.Hardlink hardlink.NewLink = newLink hardlink.Target = target request.HardlinksToMake = append(request.HardlinksToMake, hardlink) }
func (t *rpcType) updateAndUnlock(request sub.UpdateRequest, rootDirectoryName string) error { defer t.clearUpdateInProgress() t.disableScannerFunc(true) defer t.disableScannerFunc(false) startTime := time.Now() if request.Triggers == nil { request.Triggers = triggers.New() } var oldTriggers triggers.Triggers file, err := os.Open(t.oldTriggersFilename) if err == nil { decoder := json.NewDecoder(file) var trig triggers.Triggers err = decoder.Decode(&trig.Triggers) file.Close() if err == nil { oldTriggers = trig } else { t.logger.Printf("Error decoding old triggers: %s", err.Error()) } } t.copyFilesToCache(request.FilesToCopyToCache, rootDirectoryName) t.makeObjectCopies(request.MultiplyUsedObjects) t.lastUpdateHadTriggerFailures = false if len(oldTriggers.Triggers) > 0 { t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, &oldTriggers, false) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, &oldTriggers, false) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, &oldTriggers, "", false) t.doDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers, false) t.changeInodes(request.InodesToChange, rootDirectoryName, &oldTriggers, false) matchedOldTriggers := oldTriggers.GetMatchedTriggers() if runTriggers(matchedOldTriggers, "stop", t.logger) { t.lastUpdateHadTriggerFailures = true } } fsChangeStartTime := time.Now() t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, request.Triggers, true) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, request.Triggers, true) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, request.Triggers, t.objectsDir, true) t.doDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers, true) t.changeInodes(request.InodesToChange, rootDirectoryName, request.Triggers, true) fsChangeDuration := time.Since(fsChangeStartTime) matchedNewTriggers := request.Triggers.GetMatchedTriggers() file, err = os.Create(t.oldTriggersFilename) if err == nil { writer := bufio.NewWriter(file) if err := jsonlib.WriteWithIndent(writer, " ", request.Triggers.Triggers); err != nil { t.logger.Printf("Error marshaling triggers: %s", err) } writer.Flush() file.Close() } if runTriggers(matchedNewTriggers, "start", t.logger) { t.lastUpdateHadTriggerFailures = true } timeTaken := time.Since(startTime) if t.lastUpdateError != nil { t.logger.Printf("Update(): last error: %s\n", t.lastUpdateError) } else { t.rwLock.Lock() t.lastSuccessfulImageName = request.ImageName t.rwLock.Unlock() } t.logger.Printf("Update() completed in %s (change window: %s)\n", timeTaken, fsChangeDuration) return t.lastUpdateError }
func addInode(request *subproto.UpdateRequest, state *state, requiredEntry *filesystem.DirectoryEntry, myPathName string) { requiredInode := requiredEntry.Inode() if name, ok := state.inodesCreated[requiredEntry.InodeNumber]; ok { makeHardlink(request, myPathName, name) return } // Try to find a sibling inode. names := state.requiredFS.InodeToFilenamesTable[requiredEntry.InodeNumber] if len(names) > 1 { var sameDataInode filesystem.GenericInode var sameDataName string for _, name := range names { if inum, found := state.getSubInodeFromFilename(name); found { subInode := state.subFS.InodeTable[inum] _, sameMetadata, sameData := filesystem.CompareInodes( subInode, requiredInode, nil) if sameMetadata && sameData { makeHardlink(request, myPathName, name) return } if sameData { sameDataInode = subInode sameDataName = name } } } if sameDataInode != nil { updateMetadata(request, state, requiredEntry, sameDataName) makeHardlink(request, myPathName, sameDataName) return } } if inode, ok := requiredEntry.Inode().(*filesystem.RegularInode); ok { if inode.Size > 0 { if _, ok := state.subObjectCacheUsage[inode.Hash]; ok { state.subObjectCacheUsage[inode.Hash]++ } else { // Not in object cache: grab it from file-system. if state.subFS.HashToInodesTable == nil { state.subFS.BuildHashToInodesTable() } if ilist, ok := state.subFS.HashToInodesTable[inode.Hash]; ok { var fileToCopy subproto.FileToCopyToCache fileToCopy.Name = state.subFS.InodeToFilenamesTable[ilist[0]][0] fileToCopy.Hash = inode.Hash request.FilesToCopyToCache = append( request.FilesToCopyToCache, fileToCopy) state.subObjectCacheUsage[inode.Hash] = 1 } else { panic("No object in cache for: " + myPathName) } } } } var inode subproto.Inode inode.Name = myPathName inode.GenericInode = requiredEntry.Inode() request.InodesToMake = append(request.InodesToMake, inode) state.inodesCreated[requiredEntry.InodeNumber] = myPathName }
func pushImage(getSubClient getSubClientFunc, imageName string) error { logger := log.New(os.Stderr, "", log.LstdFlags) computedInodes := make(map[string]*filesystem.RegularInode) // Start querying the imageserver for the image. imageServerAddress := fmt.Sprintf("%s:%d", *imageServerHostname, *imageServerPortNum) imgChannel := getImageChannel(imageServerAddress, imageName, timeoutTime) startTime := showStart("getSubClient()") srpcClient := getSubClient() showTimeTaken(startTime) subObj := lib.Sub{ Hostname: *subHostname, Client: srpcClient, ComputedInodes: computedInodes} if *computedFilesRoot == "" { subObj.ObjectGetter = nullObjectGetterType{} } else { fs, err := scanner.ScanFileSystem(*computedFilesRoot, nil, nil, nil, nil, nil) if err != nil { return err } subObj.ObjectGetter = fs for filename, inum := range fs.FilenameToInodeTable() { if inode, ok := fs.InodeTable[inum].(*filesystem.RegularInode); ok { computedInodes[filename] = inode } } } startTime = showStart("<-imgChannel") imageResult := <-imgChannel showTimeTaken(startTime) fmt.Fprintf(os.Stderr, "Background image fetch took %s\n", format.Duration(imageResult.duration)) img := imageResult.image var err error if *filterFile != "" { img.Filter, err = filter.Load(*filterFile) if err != nil { return err } } if *triggersFile != "" { img.Triggers, err = triggers.Load(*triggersFile) if err != nil { return err } } else if *triggersString != "" { img.Triggers, err = triggers.Decode([]byte(*triggersString)) if err != nil { return err } } if err := pollFetchAndPush(&subObj, img, imageServerAddress, timeoutTime, logger); err != nil { return err } var updateRequest sub.UpdateRequest var updateReply sub.UpdateResponse startTime = showStart("lib.BuildUpdateRequest()") if lib.BuildUpdateRequest(subObj, img, &updateRequest, true, logger) { showBlankLine() return errors.New("missing computed file(s)") } showTimeTaken(startTime) updateRequest.ImageName = imageName updateRequest.Wait = true startTime = showStart("Subd.Update()") err = client.CallUpdate(srpcClient, updateRequest, &updateReply) if err != nil { showBlankLine() return err } showTimeTaken(startTime) return nil }