func makeHardlinks(hardlinksToMake []sub.Hardlink, rootDirectoryName string, triggers *triggers.Triggers, tmpDir string, takeAction bool, logger *log.Logger) { tmpName := path.Join(tmpDir, "temporaryHardlink") for _, hardlink := range hardlinksToMake { triggers.Match(hardlink.NewLink) if takeAction { targetPathname := path.Join(rootDirectoryName, hardlink.Target) linkPathname := path.Join(rootDirectoryName, hardlink.NewLink) // A Link directly to linkPathname will fail if it exists, so do a // Link+Rename using a temporary filename. if err := fsutil.ForceLink(targetPathname, tmpName); err != nil { logger.Println(err) continue } if err := fsutil.ForceRename(tmpName, linkPathname); err != nil { logger.Println(err) if err := fsutil.ForceRemove(tmpName); err != nil { logger.Println(err) } } else { logger.Printf("Linked: %s => %s\n", linkPathname, targetPathname) } } } }
func doUpdate(request sub.UpdateRequest, rootDirectoryName string) { defer clearUpdateInProgress() var oldTriggers triggers.Triggers file, err := os.Open(oldTriggersFilename) if err == nil { decoder := json.NewDecoder(file) var trig triggers.Triggers err = decoder.Decode(&trig.Triggers) file.Close() if err == nil { oldTriggers = trig } else { logger.Printf("Error decoding old triggers: %s", err.Error()) } } if len(oldTriggers.Triggers) > 0 { processDeletes(request, rootDirectoryName, &oldTriggers, false) processMakeDirectories(request, rootDirectoryName, &oldTriggers, false) matchedOldTriggers := oldTriggers.GetMatchedTriggers() runTriggers(matchedOldTriggers, "stop") } processDeletes(request, rootDirectoryName, request.Triggers, true) processMakeDirectories(request, rootDirectoryName, request.Triggers, true) matchedNewTriggers := request.Triggers.GetMatchedTriggers() file, err = os.Create(oldTriggersFilename) if err == nil { b, err := json.Marshal(request.Triggers.Triggers) if err == nil { var out bytes.Buffer json.Indent(&out, b, "", " ") out.WriteTo(file) } else { logger.Printf("Error marshaling triggers: %s", err.Error()) } file.Close() } runTriggers(matchedNewTriggers, "start") // TODO(rgooch): Remove debugging hack and implement. time.Sleep(time.Second * 15) logger.Printf("Update() complete\n") }
func processDeletes(request sub.UpdateRequest, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, pathname := range request.PathsToDelete { fullPathname := path.Join(rootDirectoryName, pathname) triggers.Match(pathname) if takeAction { // TODO(rgooch): Remove debugging. fmt.Printf("Delete: %s\n", fullPathname) // TODO(rgooch): Implement. } } }
func doDeletes(pathsToDelete []string, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool, logger *log.Logger) { for _, pathname := range pathsToDelete { fullPathname := path.Join(rootDirectoryName, pathname) triggers.Match(pathname) if takeAction { if err := fsutil.ForceRemoveAll(fullPathname); err != nil { logger.Println(err) } else { logger.Printf("Deleted: %s\n", fullPathname) } } } }
func changeInodes(inodesToChange []sub.Inode, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, inode := range inodesToChange { fullPathname := path.Join(rootDirectoryName, inode.Name) triggers.Match(inode.Name) if takeAction { if err := inode.WriteMetadata(fullPathname); err != nil { logger.Println(err) continue } logger.Printf("Changed inode: %s\n", fullPathname) } } }
func (t *rpcType) doDeletes(pathsToDelete []string, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, pathname := range pathsToDelete { fullPathname := path.Join(rootDirectoryName, pathname) triggers.Match(pathname) if takeAction { if err := fsutil.ForceRemoveAll(fullPathname); err != nil { t.lastUpdateError = err t.logger.Println(err) } else { t.logger.Printf("Deleted: %s\n", fullPathname) } } } }
func (t *rpcType) changeInodes(inodesToChange []sub.Inode, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, inode := range inodesToChange { fullPathname := path.Join(rootDirectoryName, inode.Name) triggers.Match(inode.Name) if takeAction { if err := filesystem.ForceWriteMetadata(inode, fullPathname); err != nil { t.lastUpdateError = err t.logger.Println(err) continue } t.logger.Printf("Changed inode: %s\n", fullPathname) } } }
func makeHardlinks(hardlinksToMake []sub.Hardlink, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, hardlink := range hardlinksToMake { triggers.Match(hardlink.NewLink) if takeAction { targetPathname := path.Join(rootDirectoryName, hardlink.Target) linkPathname := path.Join(rootDirectoryName, hardlink.NewLink) if err := os.Link(targetPathname, linkPathname); err != nil { logger.Println(err) } else { logger.Printf("Linked: %s => %s\n", linkPathname, targetPathname) } } } }
func makeInodes(inodesToMake []sub.Inode, rootDirectoryName string, multiplyUsedObjects map[hash.Hash]uint64, triggers *triggers.Triggers, takeAction bool) { for _, inode := range inodesToMake { fullPathname := path.Join(rootDirectoryName, inode.Name) triggers.Match(inode.Name) if takeAction { switch inode := inode.GenericInode.(type) { case *filesystem.RegularInode: makeRegularInode(fullPathname, inode, multiplyUsedObjects) case *filesystem.SymlinkInode: makeSymlinkInode(fullPathname, inode) case *filesystem.SpecialInode: makeSpecialInode(fullPathname, inode) } } } }
func processMakeDirectories(request sub.UpdateRequest, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, newdir := range request.DirectoriesToMake { if scannerConfiguration.ScanFilter.Match(newdir.Name) { continue } if newdir.Name == "/.subd" { continue } if strings.HasPrefix(newdir.Name, "/.subd/") { continue } fullPathname := path.Join(rootDirectoryName, newdir.Name) triggers.Match(newdir.Name) if takeAction { // TODO(rgooch): Remove debugging. fmt.Printf("Mkdir: %s\n", fullPathname) // TODO(rgooch): Implement. } } }
func (t *rpcType) makeDirectories(directoriesToMake []sub.Inode, rootDirectoryName string, triggers *triggers.Triggers, takeAction bool) { for _, newdir := range directoriesToMake { if t.skipPath(newdir.Name) { continue } fullPathname := path.Join(rootDirectoryName, newdir.Name) triggers.Match(newdir.Name) if takeAction { inode, ok := newdir.GenericInode.(*filesystem.DirectoryInode) if !ok { t.logger.Println("%s is not a directory!\n", newdir.Name) continue } if err := inode.Write(fullPathname); err != nil { t.logger.Println(err) } else { t.logger.Printf("Made directory: %s\n", fullPathname) } } } }
func (t *rpcType) makeInodes(inodesToMake []sub.Inode, rootDirectoryName string, multiplyUsedObjects map[hash.Hash]uint64, triggers *triggers.Triggers, takeAction bool) { for _, inode := range inodesToMake { fullPathname := path.Join(rootDirectoryName, inode.Name) triggers.Match(inode.Name) if takeAction { var err error switch inode := inode.GenericInode.(type) { case *filesystem.RegularInode: err = makeRegularInode(fullPathname, inode, multiplyUsedObjects, t.objectsDir, t.logger) case *filesystem.SymlinkInode: err = makeSymlinkInode(fullPathname, inode, t.logger) case *filesystem.SpecialInode: err = makeSpecialInode(fullPathname, inode, t.logger) } if err != nil { t.lastUpdateError = err } } } }
func (t *rpcType) doUpdate(request sub.UpdateRequest, rootDirectoryName string) { defer t.clearUpdateInProgress() t.disableScannerFunc(true) defer t.disableScannerFunc(false) startTime := time.Now() var oldTriggers triggers.Triggers file, err := os.Open(t.oldTriggersFilename) if err == nil { decoder := json.NewDecoder(file) var trig triggers.Triggers err = decoder.Decode(&trig.Triggers) file.Close() if err == nil { oldTriggers = trig } else { t.logger.Printf("Error decoding old triggers: %s", err.Error()) } } t.copyFilesToCache(request.FilesToCopyToCache, rootDirectoryName) t.makeObjectCopies(request.MultiplyUsedObjects) t.lastUpdateHadTriggerFailures = false if len(oldTriggers.Triggers) > 0 { t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, &oldTriggers, false) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, &oldTriggers, false) makeHardlinks(request.HardlinksToMake, rootDirectoryName, &oldTriggers, "", false, t.logger) doDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers, false, t.logger) changeInodes(request.InodesToChange, rootDirectoryName, &oldTriggers, false, t.logger) matchedOldTriggers := oldTriggers.GetMatchedTriggers() if runTriggers(matchedOldTriggers, "stop", t.logger) { t.lastUpdateHadTriggerFailures = true } } fsChangeStartTime := time.Now() t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, request.Triggers, true) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, request.Triggers, true) makeHardlinks(request.HardlinksToMake, rootDirectoryName, request.Triggers, t.objectsDir, true, t.logger) doDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers, true, t.logger) changeInodes(request.InodesToChange, rootDirectoryName, request.Triggers, true, t.logger) fsChangeDuration := time.Since(fsChangeStartTime) matchedNewTriggers := request.Triggers.GetMatchedTriggers() file, err = os.Create(t.oldTriggersFilename) if err == nil { b, err := json.Marshal(request.Triggers.Triggers) if err == nil { var out bytes.Buffer json.Indent(&out, b, "", " ") out.WriteTo(file) } else { t.logger.Printf("Error marshaling triggers: %s", err.Error()) } file.Close() } if runTriggers(matchedNewTriggers, "start", t.logger) { t.lastUpdateHadTriggerFailures = true } timeTaken := time.Since(startTime) t.logger.Printf("Update() completed in %s (change window: %s)\n", timeTaken, fsChangeDuration) }
func (t *rpcType) updateAndUnlock(request sub.UpdateRequest, rootDirectoryName string) error { defer t.clearUpdateInProgress() t.disableScannerFunc(true) defer t.disableScannerFunc(false) startTime := time.Now() if request.Triggers == nil { request.Triggers = triggers.New() } var oldTriggers triggers.Triggers file, err := os.Open(t.oldTriggersFilename) if err == nil { decoder := json.NewDecoder(file) var trig triggers.Triggers err = decoder.Decode(&trig.Triggers) file.Close() if err == nil { oldTriggers = trig } else { t.logger.Printf("Error decoding old triggers: %s", err.Error()) } } t.copyFilesToCache(request.FilesToCopyToCache, rootDirectoryName) t.makeObjectCopies(request.MultiplyUsedObjects) t.lastUpdateHadTriggerFailures = false if len(oldTriggers.Triggers) > 0 { t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, &oldTriggers, false) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, &oldTriggers, false) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, &oldTriggers, "", false) t.doDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers, false) t.changeInodes(request.InodesToChange, rootDirectoryName, &oldTriggers, false) matchedOldTriggers := oldTriggers.GetMatchedTriggers() if runTriggers(matchedOldTriggers, "stop", t.logger) { t.lastUpdateHadTriggerFailures = true } } fsChangeStartTime := time.Now() t.makeDirectories(request.DirectoriesToMake, rootDirectoryName, request.Triggers, true) t.makeInodes(request.InodesToMake, rootDirectoryName, request.MultiplyUsedObjects, request.Triggers, true) t.makeHardlinks(request.HardlinksToMake, rootDirectoryName, request.Triggers, t.objectsDir, true) t.doDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers, true) t.changeInodes(request.InodesToChange, rootDirectoryName, request.Triggers, true) fsChangeDuration := time.Since(fsChangeStartTime) matchedNewTriggers := request.Triggers.GetMatchedTriggers() file, err = os.Create(t.oldTriggersFilename) if err == nil { writer := bufio.NewWriter(file) if err := jsonlib.WriteWithIndent(writer, " ", request.Triggers.Triggers); err != nil { t.logger.Printf("Error marshaling triggers: %s", err) } writer.Flush() file.Close() } if runTriggers(matchedNewTriggers, "start", t.logger) { t.lastUpdateHadTriggerFailures = true } timeTaken := time.Since(startTime) if t.lastUpdateError != nil { t.logger.Printf("Update(): last error: %s\n", t.lastUpdateError) } else { t.rwLock.Lock() t.lastSuccessfulImageName = request.ImageName t.rwLock.Unlock() } t.logger.Printf("Update() completed in %s (change window: %s)\n", timeTaken, fsChangeDuration) return t.lastUpdateError }