// Returns true if there is a failure due to missing computed files. func (sub *Sub) buildUpdateRequest(image *image.Image, request *subproto.UpdateRequest, deleteMissingComputedFiles bool, logger *log.Logger) bool { sub.requiredFS = image.FileSystem sub.filter = image.Filter request.Triggers = image.Triggers sub.requiredInodeToSubInode = make(map[uint64]uint64) sub.inodesChanged = make(map[uint64]bool) sub.inodesCreated = make(map[uint64]string) sub.subObjectCacheUsage = make(map[hash.Hash]uint64, len(sub.ObjectCache)) // Populate subObjectCacheUsage. for _, hash := range sub.ObjectCache { sub.subObjectCacheUsage[hash] = 0 } if !filesystem.CompareDirectoriesMetadata(&sub.FileSystem.DirectoryInode, &sub.requiredFS.DirectoryInode, nil) { makeDirectory(request, &sub.requiredFS.DirectoryInode, "/", false) } if sub.compareDirectories(request, &sub.FileSystem.DirectoryInode, &sub.requiredFS.DirectoryInode, "/", deleteMissingComputedFiles, logger) { return true } // Look for multiply used objects and tell the sub. for obj, useCount := range sub.subObjectCacheUsage { if useCount > 1 { if request.MultiplyUsedObjects == nil { request.MultiplyUsedObjects = make(map[hash.Hash]uint64) } request.MultiplyUsedObjects[obj] = useCount } } return false }
func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) { fmt.Println("buildUpdateRequest()") // TODO(rgooch): Delete debugging. subFS := sub.fileSystem requiredImage := sub.herd.getImage(sub.requiredImage) requiredFS := requiredImage.FileSystem filter := requiredImage.Filter request.Triggers = requiredImage.Triggers var state state state.subInodeToRequiredInode = make(map[uint64]uint64) compareDirectories(request, &state, &subFS.Directory, &requiredFS.Directory, "", filter) // TODO(rgooch): Implement this. }
// Returns true if no update needs to be performed. func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) bool { var state state state.subFS = &sub.fileSystem.FileSystem requiredImage := sub.herd.getImage(sub.requiredImage) state.requiredFS = requiredImage.FileSystem filter := requiredImage.Filter request.Triggers = requiredImage.Triggers state.requiredInodeToSubInode = make(map[uint64]uint64) state.inodesChanged = make(map[uint64]bool) state.inodesCreated = make(map[uint64]string) state.subObjectCacheUsage = make(map[hash.Hash]uint64, len(sub.fileSystem.ObjectCache)) var rusageStart, rusageStop syscall.Rusage syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStart) // Populate subObjectCacheUsage. for _, hash := range sub.fileSystem.ObjectCache { state.subObjectCacheUsage[hash] = 0 } compareDirectories(request, &state, &state.subFS.DirectoryInode, &state.requiredFS.DirectoryInode, "/", filter) // Look for multiply used objects and tell the sub. for obj, useCount := range state.subObjectCacheUsage { if useCount > 1 { if request.MultiplyUsedObjects == nil { request.MultiplyUsedObjects = make(map[hash.Hash]uint64) } request.MultiplyUsedObjects[obj] = useCount } } syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStop) sub.lastComputeUpdateCpuDuration = time.Duration( rusageStop.Utime.Sec)*time.Second + time.Duration(rusageStop.Utime.Usec)*time.Microsecond - time.Duration(rusageStart.Utime.Sec)*time.Second - time.Duration(rusageStart.Utime.Usec)*time.Microsecond computeCpuTimeDistribution.Add(sub.lastComputeUpdateCpuDuration) if len(request.FilesToCopyToCache) > 0 || len(request.InodesToMake) > 0 || len(request.HardlinksToMake) > 0 || len(request.PathsToDelete) > 0 || len(request.DirectoriesToMake) > 0 || len(request.InodesToChange) > 0 { sub.herd.logger.Printf( "buildUpdateRequest(%s) took: %s user CPU time\n", sub.hostname, sub.lastComputeUpdateCpuDuration) return false } return true }
// Returns (idle, missing), idle=true if no update needs to be performed. func (sub *Sub) buildUpdateRequest(request *subproto.UpdateRequest) ( bool, bool) { waitStartTime := time.Now() sub.herd.computeSemaphore <- struct{}{} computeSlotWaitTimeDistribution.Add(time.Since(waitStartTime)) defer func() { <-sub.herd.computeSemaphore }() request.ImageName = sub.requiredImageName request.Triggers = sub.requiredImage.Triggers var rusageStart, rusageStop syscall.Rusage computeStartTime := time.Now() syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStart) subObj := lib.Sub{ Hostname: sub.mdb.Hostname, FileSystem: sub.fileSystem, ComputedInodes: sub.computedInodes, ObjectCache: sub.objectCache} if lib.BuildUpdateRequest(subObj, sub.requiredImage, request, false, sub.herd.logger) { return false, true } syscall.Getrusage(syscall.RUSAGE_SELF, &rusageStop) computeTimeDistribution.Add(time.Since(computeStartTime)) sub.lastComputeUpdateCpuDuration = time.Duration( rusageStop.Utime.Sec)*time.Second + time.Duration(rusageStop.Utime.Usec)*time.Microsecond - time.Duration(rusageStart.Utime.Sec)*time.Second - time.Duration(rusageStart.Utime.Usec)*time.Microsecond computeCpuTimeDistribution.Add(sub.lastComputeUpdateCpuDuration) if len(request.FilesToCopyToCache) > 0 || len(request.InodesToMake) > 0 || len(request.HardlinksToMake) > 0 || len(request.PathsToDelete) > 0 || len(request.DirectoriesToMake) > 0 || len(request.InodesToChange) > 0 || sub.lastSuccessfulImageName != sub.requiredImageName { sub.herd.logger.Printf( "buildUpdateRequest(%s) took: %s user CPU time\n", sub, sub.lastComputeUpdateCpuDuration) return false, false } return true, false }
func (t *rpcType) updateAndUnlock(request sub.UpdateRequest, rootDirectoryName string) error { defer t.clearUpdateInProgress() t.disableScannerFunc(true) defer t.disableScannerFunc(false) startTime := time.Now() if request.Triggers == nil { request.Triggers = triggers.New() } var oldTriggers triggers.Triggers file, err := os.Open(t.oldTriggersFilename) if err == nil { decoder := json.NewDecoder(file) var trig triggers.Triggers err = decoder.Decode(&trig.Triggers) file.Close() if err == nil { oldTriggers = trig } else { t.logger.Printf("Error decoding old triggers: %s", err.Error()) } } t.copyFilesToCache(request.FilesToCopyToCache, rootDirectoryName) t.makeObjectCopies(request.MultiplyUsedObjects) t.lastUpdateHadTriggerFailures = false if len(oldTriggers.Triggers) > 0 { t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, &oldTriggers, false) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, &oldTriggers, false) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, &oldTriggers, "", false) t.doDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers, false) t.changeInodes(request.InodesToChange, rootDirectoryName, &oldTriggers, false) matchedOldTriggers := oldTriggers.GetMatchedTriggers() if runTriggers(matchedOldTriggers, "stop", t.logger) { t.lastUpdateHadTriggerFailures = true } } fsChangeStartTime := time.Now() t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, request.Triggers, true) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, request.Triggers, true) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, request.Triggers, t.objectsDir, true) t.doDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers, true) t.changeInodes(request.InodesToChange, rootDirectoryName, request.Triggers, true) fsChangeDuration := time.Since(fsChangeStartTime) matchedNewTriggers := request.Triggers.GetMatchedTriggers() file, err = os.Create(t.oldTriggersFilename) if err == nil { writer := bufio.NewWriter(file) if err := jsonlib.WriteWithIndent(writer, " ", request.Triggers.Triggers); err != nil { t.logger.Printf("Error marshaling triggers: %s", err) } writer.Flush() file.Close() } if runTriggers(matchedNewTriggers, "start", t.logger) { t.lastUpdateHadTriggerFailures = true } timeTaken := time.Since(startTime) if t.lastUpdateError != nil { t.logger.Printf("Update(): last error: %s\n", t.lastUpdateError) } else { t.rwLock.Lock() t.lastSuccessfulImageName = request.ImageName t.rwLock.Unlock() } t.logger.Printf("Update() completed in %s (change window: %s)\n", timeTaken, fsChangeDuration) return t.lastUpdateError }