func (ck *CertKit) GetPending() (map[string]interface{}, error) { var err error var resp map[string]interface{} var names []string var fh *os.File var buf []byte resp = map[string]interface{}{} fh, err = os.Open(fmt.Sprintf("%s%cpending", ck.Path, os.PathSeparator)) if err != nil { Goose.Auth.Logf(1, "Error opening pending directory (%s%cpending): %s", ck.Path, os.PathSeparator, err) return nil, err } names, err = fh.Readdirnames(-1) if err != nil { Goose.Auth.Logf(1, "Error reading pending directory (%s%cpending): %s", ck.Path, os.PathSeparator, err) return nil, err } for _, k := range names { if (len(k) > 4) && (k[len(k)-4:] == ".crt") { buf, err = ioutil.ReadFile(fmt.Sprintf("%s%cpending%c%s", ck.Path, os.PathSeparator, os.PathSeparator, k)) if err != nil { Goose.Auth.Logf(1, "Error reading pending certificate (%s%cpending%c%s): %s", ck.Path, os.PathSeparator, os.PathSeparator, k, err) return nil, err } resp[k[:len(k)-4]] = string(buf) } } return resp, nil }
func (w *World) Open(dir string) (err error) { var fd *os.File w.dir = dir + "/o/" fd, err = os.Open(w.dir) if err == nil { defer fd.Close() var names []string names, err = fd.Readdirnames(0) if err == nil { w.obj = make(map[oid]*Obj, len(names)) for _, name := range names { var id oid if id, err = strid(name); err == nil { _, err = w.Load(id) } if err != nil { break } } } } return }
func listAllUsers(dir *os.File, list UserListFull) error { for { last := false names, err := dir.Readdirnames(3) if err != nil { if err == io.EOF { last = true } else { return err } } for _, name := range names { var user UserFull var err error var username string if user.IsValid, username, user.IsAdmin, err = checkUserFile(name); err != nil { return err } user.IsSupported, user.FormatID, user.LastChanged, user.FormatParams, _ = isFormatSupportedFull(filepath.Join(dir.Name(), name)) list[username] = user } if last { break } } return nil }
func filterTheme(dir string, conditions []string) bool { var err error var f *os.File if f, err = os.Open(dir); err != nil { logger.Debugf("Open '%s' failed: %v", dir, err) return false } defer f.Close() names := []string{} if names, err = f.Readdirnames(0); err != nil { logger.Debugf("Readdirnames '%s' failed: %v", dir, err) return false } cnt := 0 for _, name := range names { for _, c := range conditions { if name == c { cnt++ break } } } if cnt == len(conditions) { return true } return false }
func addtozip(z *zip.Writer, name string, f *os.File) { if fi, _ := f.Stat(); fi.IsDir() { log.Println("Adding folder", name) names, _ := f.Readdirnames(-1) for _, subname := range names { file, err := os.Open(filepath.Join(f.Name(), subname)) if err != nil { log.Println(err) continue } addtozip(z, filepath.Join(name, subname), file) file.Close() } } else { log.Println("Adding file", name) fw, err := z.Create(name) if err != nil { panic(err) } _, err = io.Copy(fw, f) if err != nil { panic(err) } } }
func (self articleStore) GetAllAttachments() (names []string, err error) { var f *os.File f, err = os.Open(self.attachments) if err == nil { names, err = f.Readdirnames(0) } return }
// Scan walks the directory tree at dir, looking for source units that match profiles in the // configuration. Scan returns a list of all source units found. func (c Config) Scan(dir string) (found []Unit, err error) { var profiles []Profile if c.Profiles != nil { profiles = c.Profiles } else { profiles = AllProfiles } c.Base, _ = filepath.Abs(c.Base) skipFiles := false for _, profile := range profiles { err = filepath.Walk(dir, func(path string, info os.FileInfo, inerr error) (err error) { if inerr != nil { return inerr } if info.IsDir() { if dir != path && c.skipDir(info.Name()) { return filepath.SkipDir } var dirh *os.File dirh, err = os.Open(path) if err != nil { return } defer dirh.Close() var filenames []string filenames, err = dirh.Readdirnames(0) if err != nil { return } if profile.Dir != nil && profile.Dir.DirMatches(path, filenames) { relpath, abspath := c.relAbsPath(path) found = append(found, profile.Unit(abspath, relpath, c, info)) if profile.TopLevelOnly { return filepath.SkipDir } // skip trying to match the files if gems or apps are found if profile.Name == "Ruby Gem" || profile.Name == "Ruby app" { skipFiles = true } } } else { if !skipFiles && profile.File != nil && profile.File.FileMatches(path) { relpath, abspath := c.relAbsPath(path) found = append(found, profile.Unit(abspath, relpath, c, info)) } } return }) } return }
func getImagePaths(pth string) (chan string, error) { const pathBufSize = 10 pathsChan := make(chan string, pathBufSize) var pathInfo os.FileInfo if pi, err := os.Stat(pth); err == nil { pathInfo = pi } else { return nil, err } if pathInfo.IsDir() { go func() { var dir *os.File if d, err := os.Open(pth); err == nil { dir = d } else { fmt.Errorf(err.Error()) pathsChan <- "" return } for { var entryNames []string if entNames, err := dir.Readdirnames(pathBufSize); err == nil { entryNames = entNames } else { fmt.Errorf(err.Error()) pathsChan <- "" return } for _, entName := range entryNames { p := pth + "/" + entName if pStat, err := os.Stat(p); err == nil { if !pStat.IsDir() && isSupportedImageFormat(p) { pathsChan <- p } } else { fmt.Errorf(err.Error()) } } } }() } else if !pathInfo.IsDir() && isSupportedImageFormat(pth) { pathsChan <- pth } else { return nil, errors.New("The specified file is not an image file.") } return pathsChan, nil }
// Check current directory for existing repo. func checkForRepo(file *os.File) bool { names, err := file.Readdirnames(0) if err != nil { panic(err) } for _, name := range names { if name == ObjectDir { return true } } return false }
func sortedStored(f *os.File) ([]string, error) { names, err := f.Readdirnames(-1) if err != nil { return nil, err } onlyFull := []string{} for _, v := range names { if !strings.HasSuffix(v, ".part") { onlyFull = append(onlyFull, v) } } sort.Strings(onlyFull) return onlyFull, nil }
func getFileNames(dirPath string) ([]string, error) { var ( dir *os.File err error ) if dir, err = os.Open(dirPath); err != nil { return nil, err } defer dir.Close() fns, _ := dir.Readdirnames(-1) sort.Strings(fns) return fns, nil }
func processBatchFolder(f *os.File, outdir string) (count int, e error) { names, e := f.Readdirnames(-1) if e != nil { logger.Println("error reading source folder.") return 0, e } for _, name := range names { name = filepath.Join(f.Name(), name) go runTask(name, outdir) count++ } return count, nil }
func readdir_stat(dir string, f *os.File) ([]os.FileInfo, error) { names, err := f.Readdirnames(-1) if err != nil { return nil, err } fis := make([]os.FileInfo, len(names)) for i, name := range names { fis[i], err = os.Stat(filepath.Join(dir, name)) if err != nil { return nil, err } } return fis, nil }
func readdir_stat(dir string, f *os.File) ([]os.FileInfo, error) { names, err := f.Readdirnames(-1) if err != nil { return nil, err } fis := make([]os.FileInfo, 0, len(names)) for _, name := range names { fi, err := os.Stat(filepath.Join(dir, name)) if err != nil { continue } fis = append(fis, fi) } return fis, nil }
func DirFileToTree(f *os.File, path string) (DirTree, error) { fi, err := f.Stat() if err != nil { return nil, fmt.Errorf("reading stats for %s: %s", path, err) } if fi.IsDir() { ret := &DirFolder{ name: fi.Name(), Entries: []DirTree{}, } names, err := f.Readdirnames(0) if err != nil { return nil, fmt.Errorf("reading files at %s: %s", path, err) } for _, name := range names { entryPath := path + "/" + name f, err := os.Open(entryPath) if err != nil { // Ignore errors here; best effort. continue } defer f.Close() entry, err := DirFileToTree(f, entryPath) if err != nil { return nil, err } ret.Entries = append(ret.Entries, entry) } sort.Sort(byName(ret.Entries)) return ret, nil } else { var df *DirFile df = &DirFile{ name: fi.Name(), contents: func() (string, error) { bs, err := ioutil.ReadFile(path) return string(bs), err }, } return df, nil } }
// Wrapper for Readdirnames that converts it into a generator-style method. func (a *Archiver) readdirnames(dir *os.File) chan string { retval := make(chan string, 256) go func(dir *os.File) { for { names, err := dir.Readdirnames(256) if err == io.EOF { break } else if err != nil { a.Logger.Warning("error reading directory:", err.Error()) } for _, name := range names { retval <- name } } close(retval) }(dir) return retval }
func showDirFile(file *os.File, indent string, isLastDir bool) { dirnames, err := file.Readdirnames(0) if err != nil { return } dirnames = sortDir(dirnames) var isLastFile bool for i, dirname := range dirnames { if i+1 == len(dirnames) { isLastFile = true isLastDir = true } else { isLastFile = false isLastDir = false } file, err := os.Open(path.Join(file.Name(), dirname)) if err != nil { continue } defer file.Close() fi, err := file.Stat() if err != nil { continue } if fi.IsDir() { directoriesCnt++ _, lastPart := path.Split(file.Name()) if isLastDir { fmt.Print(indent+"└── ", lastPart, "\n") } else { fmt.Print(indent+"├── ", lastPart, "\n") } if isLastDir { showDirFile(file, indent+" ", isLastDir) } else { showDirFile(file, indent+"│ ", isLastDir) } } else { filesCnt++ showRegularFile(file, indent, isLastFile) } } }
// DirIsEmpty checks if the given directory is empty. func DirIsEmpty(dir string) bool { var err error var f *os.File if f, err = os.Open(dir); err == nil { var names []string if names, err = f.Readdirnames(0); err != nil { panic(err) } if len(names) > 0 { return false } return true } panic(err) }
func (l *GameLayer) LoadLevel(path string) (err error) { var ( dir *os.File candidates []string names = []string{} ) if dir, err = os.Open(path); err != nil { return } if candidates, err = dir.Readdirnames(0); err != nil { return } sort.Sort(sort.StringSlice(candidates)) for _, name := range candidates { if strings.HasPrefix(name, "layer") { names = append(names, name) } } l.Level, err = LoadLevel(path, names, l.App.GameEventHandler) return }
// Helper function to scan a directory that might potentially contain a // matching file. Closes dir on return. func dirScan(dirPath string, dir *os.File, lowerFileName string, matchedNames []string) ([]string, error) { defer dir.Close() var dirEntries []string var err error maxEntries := 100 for err = nil; err == nil; dirEntries, err = dir.Readdirnames(maxEntries) { for _, entry := range dirEntries { if lowerFileName == strings.ToLower(entry) { // Found a match. matchedNames = append(matchedNames, filepath.Join(dirPath, entry)) } } } if err != io.EOF { // Broke on reading directory entries. return nil, err } return matchedNames, nil }
func hasSupportedAdminHashes(dir *os.File) (bool, error) { success := false for { last := false names, err := dir.Readdirnames(3) if err != nil { if err == io.EOF { last = true } else { return false, err } } for _, name := range names { valid, user, isAdmin, err := checkUserFile(name) if err != nil { return false, err } if !valid { wl.Printf("ignoring file for invalid username: '******'", user) } if !isAdmin { continue } if exists, _ := fileExists(filepath.Join(dir.Name(), user) + userExt); exists { return false, fmt.Errorf("both '%s' and '%s' exist", name, user+userExt) } if ok, _ := IsFormatSupported(filepath.Join(dir.Name(), name)); ok { success = true } } if last { break } } return success, nil }
// Send an HTTP response. The path is supposed to be sanitized already. func sendResponse(conn io.Writer, req string, path string) (err error) { var file *os.File var info os.FileInfo if strings.ToUpper(req) != "GET" { conn.Write([]byte("HTTP/1.0 501 Not Implemented\r\nContent-Type: text/html\r\n\r\n<html><h1>501 Not Implemented</h1></html>")) err = errors.New("only GET is implemented") return err } info, err = os.Lstat(path) if err != nil { conn.Write([]byte("HTTP/1.0 404 Not Found\r\nContent-Type: text/html\r\n\r\n<html><h1>404 Not Found</h1></html>")) return err } if !info.Mode().IsRegular() && !info.Mode().IsDir() { conn.Write([]byte("HTTP/1.0 400 Bad Request\r\nContent-Type: text/html\r\n\r\n<html><h1>400 Bad Request</h1></html>")) err = errors.New("not a regular file or directory") return err } if info.Mode().IsDir() { file, _ = os.Open(path) // TODO error handling names, _ := file.Readdirnames(-1) // TODO error handling conn.Write([]byte("HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n<html><h1>Directory Listing</h1>\n")) for _, name := range names { conn.Write([]byte(fmt.Sprintf("<a href=\"%s\">%s</a><br/>\n", name, name))) } conn.Write([]byte("</html>")) return nil } file, _ = os.Open(path) // TODO error handling conn.Write([]byte("HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n")) io.Copy(conn, file) file.Close() return nil }
// This accepts a directory from the main function // The directory has all of the data stored in the series of files in it. func compile_the_data(data_directory *os.File) string { // initializes a buffer to store all the data var buf_arr []byte buf := bytes.NewBuffer(buf_arr) // this is to get the pwd from within the new directory base_dir := data_directory.Name() // this reads all of the file names in the directory and stores them in a slice if partial_file_names, err := data_directory.Readdirnames(-1); err != nil { panic(err) } else { list_of_file_names := get_complete_file_names(base_dir, partial_file_names) // for each file in the directory... for file := range list_of_file_names { // open the file my_file, err := os.Open(list_of_file_names[file]) if err != nil { panic(err) } // end if defer my_file.Close() // then grab all the data buf.Write(pull_data(my_file)) } // end for loop } // end else fmt.Println("The data has been successfully compiled!") return buf.String() } // end function
func listSupportedUsers(dir *os.File, list UserList) error { for { last := false names, err := dir.Readdirnames(3) if err != nil { if err == io.EOF { last = true } else { return err } } for _, name := range names { valid, user, isAdmin, err := checkUserFile(name) if err != nil { return err } if !valid { wl.Printf("ignoring file for invalid username: '******'", user) continue } ok, _, lastchanged, _, _ := isFormatSupportedFull(filepath.Join(dir.Name(), name)) if !ok { wl.Printf("ignoring file with unsupported hash format for username: '******'", user) continue } list[user] = User{isAdmin, lastchanged} } if last { break } } return nil }
func ZipFolder(folder, outName string) (err error) { var ( dirToZip *os.File filesToZip []string zFile io.Writer zipFile *os.File zipWriter *zip.Writer content []byte ) if dirToZip, err = os.Open(folder); err != nil { return } defer dirToZip.Close() if filesToZip, err = dirToZip.Readdirnames(-1); err != nil { return } if zipFile, err = os.Create(outName); err != nil { return } defer zipFile.Close() zipWriter = zip.NewWriter(zipFile) for _, fileName := range filesToZip { if content, err = ioutil.ReadFile(filepath.Join(folder, fileName)); err != nil { return } if zFile, err = zipWriter.Create(fileName); err != nil { return } if _, err = zFile.Write(content); err != nil { return } } return zipWriter.Close() }
func open_dir(dir *os.File, dir_name string, recursively bool) { names, _ := dir.Readdirnames(-1) for _, name := range names { abs_name := dir_name + "/" + name if name_is_ignored(abs_name) { continue } fi, _ := os.Lstat(abs_name) if nil == fi { continue } if fi.IsDirectory() { if recursively { child_dir, _ := os.OpenFile(abs_name, os.O_RDONLY, 0) if nil != child_dir { open_dir(child_dir, abs_name, true) } child_dir.Close() } } else { session_open_and_read_file(abs_name) } } }
var dir *os.File var fi os.FileInfo var dir_list []string var envrc []byte if config, err = LoadConfig(env); err != nil { return err } allowed := config.AllowDir() if dir, err = os.Open(allowed); err != nil { return err } defer dir.Close() if dir_list, err = dir.Readdirnames(0); err != nil { return err } for _, hash := range dir_list { filename := path.Join(allowed, hash) if fi, err = os.Stat(filename); err != nil { return err } if !fi.IsDir() { if envrc, err = ioutil.ReadFile(filename); err != nil { return err } envrc_str := strings.TrimSpace(string(envrc))
// Compare two files (or directories) for equality. func compare(aName, bName string) (err error) { var aFileInfo, bFileInfo os.FileInfo aFileInfo, err = os.Stat(aName) if err != nil { return } bFileInfo, err = os.Stat(bName) if err != nil { return } aIsDir, bIsDir := aFileInfo.IsDir(), bFileInfo.IsDir() if aIsDir != bIsDir { return fmt.Errorf("%s.IsDir() == %v != %s.IsDir() == %v", aName, aIsDir, bName, bIsDir) } var aFile, bFile *os.File aFile, err = os.Open(aName) if err != nil { return } defer aFile.Close() bFile, err = os.Open(bName) if err != nil { return } defer bFile.Close() if !aIsDir { aSize, bSize := aFileInfo.Size(), bFileInfo.Size() if aSize != bSize { return fmt.Errorf("%s.Size() == %v != %s.Size() == %v", aName, aSize, bName, bSize) } var aBuf, bBuf bytes.Buffer bufferSize := int64(128 * 1024) for i := int64(0); i < aSize; i += bufferSize { toRead := bufferSize remainder := aSize - i if toRead > remainder { toRead = remainder } _, err = io.CopyN(&aBuf, aFile, toRead) if err != nil { return } _, err = io.CopyN(&bBuf, bFile, toRead) if err != nil { return } aBytes, bBytes := aBuf.Bytes(), bBuf.Bytes() for j := int64(0); j < toRead; j++ { a, b := aBytes[j], bBytes[j] if a != b { err = fmt.Errorf("%s[%d] %d != %d", aName, i+j, a, b) return } } aBuf.Reset() bBuf.Reset() } } else { var aNames, bNames []string aNames, err = aFile.Readdirnames(0) if err != nil { return } bNames, err = bFile.Readdirnames(0) if err != nil { return } if len(aNames) != len(bName) { err = fmt.Errorf("Directories %v and %v don't contain same number of files %d != %d", aName, bName, len(aNames), len(bNames)) } for _, name := range aNames { err = compare(path.Join(aName, name), path.Join(bName, name)) if err != nil { return } } } return }
// Load information about a particular shard. func (shd *ShardLoader) load() { shd.info = nil fi, err := os.Stat(shd.path) if err != nil { if os.IsNotExist(err) { shd.infoErr = nil return } shd.infoErr = errors.New(fmt.Sprintf( "stat() error on leveldb directory "+ "%s: %s", shd.path, err.Error())) return } if !fi.Mode().IsDir() { shd.infoErr = errors.New(fmt.Sprintf( "stat() error on leveldb directory "+ "%s: inode is not directory.", shd.path)) return } var dbDir *os.File dbDir, err = os.Open(shd.path) if err != nil { shd.infoErr = errors.New(fmt.Sprintf( "open() error on leveldb directory "+ "%s: %s.", shd.path, err.Error())) return } defer func() { if dbDir != nil { dbDir.Close() } }() _, err = dbDir.Readdirnames(1) if err != nil { if err == io.EOF { // The db directory is empty. shd.infoErr = nil return } shd.infoErr = errors.New(fmt.Sprintf( "Readdirnames() error on leveldb directory "+ "%s: %s.", shd.path, err.Error())) return } dbDir.Close() dbDir = nil shd.ldb, err = levigo.Open(shd.path, shd.dld.openOpts) if err != nil { shd.ldb = nil shd.infoErr = errors.New(fmt.Sprintf( "levigo.Open() error on leveldb directory "+ "%s: %s.", shd.path, err.Error())) return } shd.info, err = shd.readShardInfo() if err != nil { shd.infoErr = err return } shd.infoErr = nil }
/* Loads the code from the specified package into the runtime. Recurses to load packages which the argument package depends on. TODO artifact version preferences are NOT being handled correctly yet. What should happen is that all artifacts whose versions "ARE" recursively preferred should have their version preferences consulted and respected first (closer to top of load-tree has preference priority) and then and only then should a single no-version-preferred artifact be loaded by asking what its latest version is, then re-visit if that constrains other formerly no-version-preferred ones, then load newly preferred versions, then load the next no-version-preferred artifact. Currently, an artifact version may be loaded in a no-version-preferred way, because the current load-tree descent path does not prefer a version, but it could be that a subsequently loaded artifact somewhere else in the load-tree COULD express a preference for a different version of the artifact, but its preference is consulted too late, after the artifact is already loaded. Handles searching first in a local (private) artifacts repository (directory tree) then a shared artifacts repository, but only if a directive is not in effect to load from shared only, and only if another package from the same artifact has not already been loaded, because in that case, the local (private) or shared decision has already been made and must apply to subsequent packages loaded from the same artifact. If not found locally, tries to load from the Internet (at several standard locations). TODO signed-code integrity checks */ func (ldr *Loader) LoadPackage(originAndArtifactPath string, version string, packagePath string, mustBeFromShared bool) (gen *generator.Generator, err error) { if Logging(PARSE_) { parserDebugMode |= parser.Trace } // First, see if the package is already loaded. If so, return packageIdentifier := originAndArtifactPath + "/pkg/" + packagePath beingLoaded := ldr.PackagesBeingLoaded[packageIdentifier] if beingLoaded { err = fmt.Errorf("Package dependency loop. Package '%s' is also present in the tree of packages it imports.", packageIdentifier) return } loadedVersion, found := ldr.LoadedPackages[packageIdentifier] if found { if loadedVersion != version && version != "" { err = fmt.Errorf("Can't load version %s of '%s' since version %s is already loaded into runtime.", version, packageIdentifier, loadedVersion) } return } localArtifactMetadataFilePath := ldr.RelishRuntimeLocation + "/artifacts/" + originAndArtifactPath + "/metadata.txt" sharedArtifactMetadataFilePath := ldr.RelishRuntimeLocation + "/shared/relish/artifacts/" + originAndArtifactPath + "/metadata.txt" sharedReplicaMetadataFilePath := ldr.RelishRuntimeLocation + "/shared/relish/replicas/" + originAndArtifactPath + "/metadata.txt" // Current version of artifact according to shared artifact metadata found in this relish directory tree. sharedCurrentVersion := "" // Date of the artifact metadata that is being relied on for the package load. metadataDate := "" ldr.PackagesBeingLoaded[packageIdentifier] = true if !ldr.quiet { Log(ALWAYS_, "Loading package %s\n", packageIdentifier) } Log(LOAD2_, "LoadPackage: ldr.RelishRuntimeLocation=%s\n", ldr.RelishRuntimeLocation) Log(LOAD2_, "LoadPackage: originAndArtifactPath=%s\n", originAndArtifactPath) Log(LOAD2_, "LoadPackage: version=%s\n", version) Log(LOAD2_, "LoadPackage: packagePath=%s\n", packagePath) mustBeFromShared = mustBeFromShared || ldr.SharedCodeOnly // Set whether will consider local code for this package. var mustBeFromLocal bool // We may end up constrained to load from local artifact. // Package is not loaded. But see if any other packages from the same artifact are loaded. // If so, make sure they don't have an incompatible version. var artifactAlreadyLoaded bool // if true, at least one package from the currently-being-loaded artifact has already been loade. // This means the needed version of the artifact and the artifacts it depends on have already // been loaded from built.txt into LoadedArtifacts map. var artifactKnownToBeLocal bool // if artifact is loaded or being loaded, is it loaded from local var artifactKnownToBePublished bool // if artifact is loaded or being loaded, is it loaded from shared var artifactKnownToBeReplica bool // if artifact is loaded or being loaded, is it downloaded and loaded from shared/replicas loadedVersion, artifactAlreadyLoaded = ldr.LoadedArtifacts[originAndArtifactPath] if artifactAlreadyLoaded { if loadedVersion != version && version != "" { err = fmt.Errorf("Can't load package '%s' from version %s of '%s'. Another package from version %s of the artifact is already loaded into runtime.", packagePath, version, originAndArtifactPath, loadedVersion) return } artifactKnownToBeLocal = ldr.LoadedArtifactKnownToBeLocal[originAndArtifactPath] artifactKnownToBePublished = ldr.LoadedArtifactKnownToBePublished[originAndArtifactPath] artifactKnownToBeReplica = ldr.LoadedArtifactKnownToBeReplica[originAndArtifactPath] mustBeFromShared = mustBeFromShared || artifactKnownToBePublished || artifactKnownToBeReplica // Set whether will consider local code for this package. Log(LOAD2_, "%s %s mustBeFromShared=%v\n", originAndArtifactPath, packagePath, mustBeFromShared) if artifactKnownToBeLocal { if mustBeFromShared { // This should never happen I think. Check anyway. err = fmt.Errorf("Can't load package '%s' from shared artifact '%s'. Another package from the local copy of the artifact is already loaded into runtime.", packagePath, originAndArtifactPath) return } else { mustBeFromLocal = true } } } // Now try to load the package from local file system. // If allowed, need to try twice, trying to read from // local artifacts dir tree, then if not found from shared artifacts dir tree. //// If no version has been specified, but some version of the artifact exists in local disk, //// the first thing to do is to read the metadata.txt of the local artifact, and set the version number desired //// to the current version as specified by the local artifact copy. Note this could be out of date, but we need a //// different command to go check if there is a later version of the artifact out there and download it. //// If we can find a metadata.txt file for the artifact locally, set the version with it. if version == "" { if !mustBeFromShared { version, metadataDate, err = ldr.readMetadataFile(localArtifactMetadataFilePath) if err != nil { return } } if version == "" { if mustBeFromLocal { // We already loaded a package from the local artifact, but somehow the local artifact is no longer there on filesystem. // This should never happen if everything is being loaded at once at beginning of run. Check anyway. err = fmt.Errorf("Can't load package '%s' from local artifact '%s'. Local artifact not found.", packagePath, originAndArtifactPath) return } version, metadataDate, err = ldr.readMetadataFile(sharedArtifactMetadataFilePath) if err != nil { return } if version == "" { version, metadataDate, err = ldr.readMetadataFile(sharedReplicaMetadataFilePath) if err != nil { return } if version != "" { artifactKnownToBeReplica = true } } else { artifactKnownToBePublished = true } sharedCurrentVersion = version } } // stat the artifact version dir to see if the version of the artifact exists in the filesystem. // // try local then shared artifacts and replicas dir trees as allowed by constraints so far artifactVersionDirFound := false var artifactVersionDir string if version != "" { versionStr := "/v" + version if !mustBeFromShared { // try local artifacts dir tree artifactVersionDir = ldr.RelishRuntimeLocation + "/artifacts/" + originAndArtifactPath + versionStr _, statErr := gos.Stat(artifactVersionDir) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish artifact version directory '%s': %v\n", artifactVersionDir, statErr) return } } else { artifactVersionDirFound = true mustBeFromLocal = true // locked to local (private) artifact now artifactKnownToBeLocal = true } } if !artifactVersionDirFound { // this version not found in local artifacts dir tree // try published shared artifacts dir tree artifactVersionDir = ldr.RelishRuntimeLocation + "/shared/relish/artifacts/" + originAndArtifactPath + versionStr _, statErr := gos.Stat(artifactVersionDir) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish artifact version directory '%s': %v\n", artifactVersionDir, statErr) return } } else { artifactVersionDirFound = true mustBeFromShared = true // locked to shared artifact now artifactKnownToBePublished = true } } if !artifactVersionDirFound { // this version not found in local artifacts dir tree or published shared artifacts dir tree // try downloaded shared replicas dir tree artifactVersionDir = ldr.RelishRuntimeLocation + "/shared/relish/replicas/" + originAndArtifactPath + versionStr _, statErr := gos.Stat(artifactVersionDir) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish artifact version directory '%s': %v\n", artifactVersionDir, statErr) return } } else { artifactVersionDirFound = true mustBeFromShared = true // locked to shared artifact now artifactKnownToBeReplica = true } } } if !artifactVersionDirFound { // Have not found the artifact version locally. Fetch it from the Internet. Log(LOAD2_, "artifact version |%s| not found locally\n", version) Log(LOAD2_, "localArtifactMetadataFilePath=%s\n", localArtifactMetadataFilePath) // TODO Need this path in order to install or update the artifact metadata file from remote, if there is none locally // or if the remote one is more recent. // // artifactMetadataFilePath := ldr.RelishRuntimeLocation + "/shared/relish/artifacts/" + originAndArtifactPath + "/metadata.txt" // Note: We will always be fetching into the shared artifacts directory tree. // If programmer wants to copy an artifact version into the local artifacts directory tree to develop/modify it, // they must currently do that copy separately manually. var zipFileContents []byte if sharedCurrentVersion == "" { sharedCurrentVersion, metadataDate, err = ldr.readMetadataFile(sharedReplicaMetadataFilePath) } // replaced by stuff below // defaultHostURL := ldr.DefaultCodeHost(originAndArtifactPath) // hostURL := defaultHostURL // // REVIEW THIS COMMENT // Note: TODO The correct order to do things is to load the metadata.txt file from the default host // (if possible) then to search for secondary hosts to get the version zip file, selecting // one AT RANDOM, // then if all of those (some number of) mirrors fail, get it from the default host // Also, use port 80 then 8421. // TODO: RE: SERVER SEARCH ORDER // We really ought to consider using a different order of servers tried // for fetching artifact source-code zip files than the order we use for trying to find // the smaller artifact metadata.txt files. Specifically, it is better to find // metadata.txt files at servers owned or controlled by the origin, because the metadata.txt // file will be up to date. shared.relish.pl is next best for that consideration. // However, from a performance (load sharing when scaled) perspective, it is better to // download the actual source code zip files from randomly found replica servers or // secondary general repositories. hostURLs := ldr.NonSearchedCodeHostURLs(originAndArtifactPath, "") // NEW // If we did not have the metadata file on filesystem before, or if remote metadata file is newer, // we should download and cache the metadata.txt file from the remote repository. // // Then, if we do not have a specified version yet, we should set version # from that, var currentVersion string var usingCentralRepo bool = false // whether getting code from http://shared.relish.pl var hostURL string for _, hostURL = range hostURLs { // Read remote metadata file. Store it locally if its date is >= the local shared artifact metadata date. currentVersion, err = fetchArtifactMetadata(hostURL, originAndArtifactPath, metadataDate, sharedCurrentVersion, sharedReplicaMetadataFilePath) if err == nil { break } else if currentVersion != "" { return // Inability to create or write local metadata file. Bad error. } } if currentVersion == "" { // TODO Now try google search // Should really do a Google search for metadata found anywhere (except shared.relish.pl) now, // so as to limit load and single point of failure on shared.relish.pl. /* hostURLs,err = ldr.FindSecondaryCodeHosts(originAndArtifactPath, hostURLs) if err != nil { return } for _,hostURL = range hostURLs { // Read remote metadata file. Store it locally if its date is >= the local shared artifact metadata date. currentVersion, err = fetchArtifactMetadata(hostURL, originAndArtifactPath, metadataDate, sharedCurrentVersion, sharedReplicaMetadataFilePath) if err == nil { break } else if currentVersion != "" { return // Inability to create or write local metadata file. Bad error. } } */ // // Only if trying other replica sites fails should shared.relish.pl be tried. // // However, for now, we're going straight to trying shared.relish.pl, because Google searching and // signed-metadata verification and signed-code verification aren't implemented yet. } if currentVersion == "" { // Now try shared.relish.pl usingCentralRepo = true hostURL = "http://shared.relish.pl" // Read remote metadata file. Store it locally if its date is >= the local shared artifact metadata date. currentVersion, err = fetchArtifactMetadata(hostURL, originAndArtifactPath, metadataDate, sharedCurrentVersion, sharedReplicaMetadataFilePath) if err != nil { return // We really couldn't find and download metadata for this artifact anywhere we looked. Too bad. } } // metadataHostURL := hostURL // If we need to keep track of where we got the metadata from. if usingCentralRepo { hostURLs = []string{hostURL} } else { hostURLs = ldr.NonSearchedCodeHostURLs(originAndArtifactPath, hostURL) // NEW } if version == "" { version = currentVersion } // Version must now be a proper version number string, not "" var zipFileName string for _, hostURL = range hostURLs { zipFileContents, zipFileName, err = fetchArtifactZipFile(hostURL, originAndArtifactPath, version) if err == nil { break } // TODO consider logging the missed fetch and or developing a bad reputation for the host. } if zipFileContents == nil { err = fmt.Errorf("Search of Internet did not find relish software artifact '%s'", originAndArtifactPath) return } artifactKnownToBeReplica = true // Unzip the artifact into the proper local directory tree // TODO TODO Really don't know the artifact version here in some case, (in case there was nothing // not even a metadata.txt file locally, and no version was specified on command line) so // we don't have the correct path for artifactVersionDir known yet in that case !!! // WE DO KNOW IT HAS TO BE A SHARED REPLICASE DIR PATH however. versionStr := "/v" + version artifactVersionDir = ldr.RelishRuntimeLocation + "/shared/relish/replicas/" + originAndArtifactPath + versionStr //gos.MkdirAll(name string, perm FileMode) error var perm os.FileMode = 0777 err = gos.MkdirAll(artifactVersionDir, perm) if err != nil { return } zipFilePath := ldr.RelishRuntimeLocation + "/shared/relish/replicas/" + originAndArtifactPath + "/" + zipFileName err = gos.WriteFile(zipFilePath, zipFileContents, perm) if err != nil { return } // Open an artifact version zip archive for reading. var srcZipFileContents []byte srcZipFileContents, err = zip_util.ExtractFileFromZipFileContents(zipFileContents, "artifactVersionContents.zip") if err != nil { return } ///////////////////////////////////////////////////////////////////////////////////// // Verify the signed contents using digital signature verification. var sharedRelishPublicKeyCertificateBytes []byte var sharedRelishPublicKeyCertificate string var installationSharedRelishPublicKeyCert string var originPublicKeyCertificateBytes []byte var originPublicKeyCertificate string var signatureBytes []byte var signature string sharedRelishPublicKeyCertificateBytes, err = zip_util.ExtractFileFromZipFileContents(zipFileContents, "sharedRelishPublicKeyCertificate.pem") if err != nil { return } sharedRelishPublicKeyCertificate = strings.TrimSpace(string(sharedRelishPublicKeyCertificateBytes)) originPublicKeyCertificateBytes, err = zip_util.ExtractFileFromZipFileContents(zipFileContents, "originPublicKeyCertificate.pem") if err != nil { return } originPublicKeyCertificate = strings.TrimSpace(string(originPublicKeyCertificateBytes)) signatureBytes, err = zip_util.ExtractFileFromZipFileContents(zipFileContents, "signatureOfArtifactVersionContents.pem") if err != nil { return } signature = strings.TrimSpace(string(signatureBytes)) installationSharedRelishPublicKeyCert, err = crypto_util.GetPublicKeyCert("origin", "shared.relish.pl2012") if err != nil { // Could not find shared relish pl public key cert in keys/public directory of this relish project directory. // Try downloading the public key from http://shared.relish.pl and installing it in the project directory. var cert string cert, err = FetchSharedRelishPublicKeyCert() if err != nil { return } err = crypto_util.StorePublicKeyCert("origin", "shared.relish.pl2012", cert) if err != nil { return } installationSharedRelishPublicKeyCert, err = crypto_util.GetPublicKeyCert("origin", "shared.relish.pl2012") if err != nil { return } } installationSharedRelishPublicKeyCert = strings.TrimSpace(installationSharedRelishPublicKeyCert) if err != nil { return } // Is the shared relish public key cert in the artifact zip file identical to the cert that came with my // relish distribution? If not, panic. if sharedRelishPublicKeyCertificate != installationSharedRelishPublicKeyCert { err = fmt.Errorf("Did not install downloaded artifact because shared.relish.pl2012 public key certificate\n"+ "in artifact %s (v%s) downloaded from %s\n"+ "is different than shared.relish.pl2012 public key certificate in this relish installation.\n", originAndArtifactPath, version, hostURL) return } // Validate that shared.relish.pl2012 public key is signed properly, obtaining the shared.relish.pl2012 publicKeyPEM. sharedRelishPublicKey := crypto_util.VerifiedPublicKey("", sharedRelishPublicKeyCertificate, "origin", "shared.relish.pl2012") if sharedRelishPublicKey == "" { err = errors.New("Invalid shared.relish.pl2012 public key certificate.") return } // Validate the artifact-publishing origin's public key cert slashPos := strings.Index(originAndArtifactPath, "/") originId := originAndArtifactPath[:slashPos] originPublicKey := crypto_util.VerifiedPublicKey(sharedRelishPublicKey, originPublicKeyCertificate, "origin", originId) if originPublicKey == "" { err = fmt.Errorf("Did not install downloaded artifact because %s public key certificate\n"+ "in artifact %s (v%s) downloaded from %s\n"+ "is invalid.\n", originId, originAndArtifactPath, version, hostURL) return } signedContent := zipFileName + "_|_" + string(srcZipFileContents) if !crypto_util.Verify(originPublicKey, signature, signedContent) { err = fmt.Errorf("Did not install downloaded artifact because artifact version content\n"+ "in artifact %s (v%s) downloaded from %s\n"+ "does not match (was not verified by) its digital signature.\n", originAndArtifactPath, version, hostURL) return } // Woohoo! Contents are verified. // ///////////////////////////////////////////////////////////////////////////////////// // // Write them to relish installation shared code directory tree. // Note: Assuming the artifactVersionContents.zip file starts with src/ pkg/ doc/ etc not with v0002/ err = zip_util.ExtractZipFileContents(srcZipFileContents, artifactVersionDir) if err != nil { return } Log(ALWAYS_, "Downloaded %s (v%s) from %s\n", originAndArtifactPath, version, hostURL) } if !artifactAlreadyLoaded { // Read built.txt from the artifact version directory builtFilePath := artifactVersionDir + "/built.txt" var builtFileContents []byte _, statErr := gos.Stat(builtFilePath) if statErr == nil { builtFileContents, err = gos.ReadFile(builtFilePath) if err != nil { return } artifactsVersionsStrs := strings.Fields(string(builtFileContents)) n := len(artifactsVersionsStrs) for i := 0; i < n; i += 2 { artifactPath := artifactsVersionsStrs[i] artifactVersion := artifactsVersionsStrs[i+1] alreadyDesiredVersion, versionFound := ldr.LoadedArtifacts[artifactPath] if versionFound { if artifactVersion != alreadyDesiredVersion { Log(ALWAYS_, "Using v%s of %s. %s (v%s) may prefer v%s of %s.\n", alreadyDesiredVersion, artifactPath, originAndArtifactPath, version, artifactVersion, artifactPath) } } else { // Tell the loader to prefer the version of the other artifact // that the being-loaded artifact built.txt file specifies. ldr.LoadedArtifacts[artifactPath] = artifactVersion } } } else if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat '%s': %v\n", builtFilePath, statErr) return } // else there is no built.txt file. Accept that for now // and subsequently load the current versions of artifacts where no other version is preferred. } //////////////////////////////////////////////////////////////////////// // TODO // Load all the code files in the package of the artifact. // compile files if necessary? // // record the version of the artifact and package in the loader's registry of loaded packages and artifacts. packageSourcePath := artifactVersionDir + "/src/" + packagePath packageCompiledPath := artifactVersionDir + "/pkg/" + packagePath // Read the filenames of source files etc. in the /src/... package directory var sourceDirFile *os.File sourceDirFile, err = gos.Open(packageSourcePath) if err != nil { return } defer sourceDirFile.Close() var filenames []string filenames, err = sourceDirFile.Readdirnames(-1) if err != nil { return } // Create /pkg/ dir tree if does not exist already _, statErr := gos.Stat(packageCompiledPath) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish intermediate-code directory '%s': %v\n", packageCompiledPath, statErr) return } var perm os.FileMode = 0777 err = gos.MkdirAll(packageCompiledPath, perm) if err != nil { return } } ldr.LoadedArtifacts[originAndArtifactPath] = version ldr.LoadedArtifactKnownToBePublished[originAndArtifactPath] = strings.Contains(artifactVersionDir, "/shared/relish/artifacts/") ldr.LoadedArtifactKnownToBeReplica[originAndArtifactPath] = artifactKnownToBeReplica Log(LOAD2_, "ldr.LoadedArtifactKnownToBePublished[%s]=%v\n", originAndArtifactPath, ldr.LoadedArtifactKnownToBePublished[originAndArtifactPath]) Log(LOAD2_, "ldr.LoadedArtifactKnownToBeReplica[%s]=%v\n", originAndArtifactPath, ldr.LoadedArtifactKnownToBeReplica[originAndArtifactPath]) Logln(LOAD_, "artifactVersionDir="+artifactVersionDir) ldr.LoadedArtifactKnownToBeLocal[originAndArtifactPath] = !(ldr.LoadedArtifactKnownToBePublished[originAndArtifactPath] || artifactKnownToBeReplica) if relish.DatabaseURI() == "" { dbDirPath := ldr.databaseDirPath(originAndArtifactPath) var perm os.FileMode = 0777 err = gos.MkdirAll(dbDirPath, perm) if err != nil { return } dbFilePath := dbDirPath + "/" + ldr.DatabaseName relish.SetDatabaseURI(dbFilePath) // TODO NOT TRUE AT ALL YET // This can be overridden with a statement in the program, as long as a persistence op has not been used first. } // Collect a map of file nodes to the root of the filename. We will be passing this to the generator to generate runtime // code for the whole package all at once. // astFileNodes := make(map[*ast.File]string) for _, filename := range filenames { var sourceFound bool var pickledFound bool if strings.HasSuffix(filename, ".rel") { // consider only the relish source files in the dir. // This is actually quite controversial, since it means that source code MUST be present // or we won't bother looking for the compiled file to load. // This is a somewhat political opinionated decision. Will have to be seriously mulled if not pondered. // TODO add in here the on-demand compilation as found in relish.go // Doing it NOW!! sourceFilePath := packageSourcePath + "/" + filename fileNameRoot := filename[:len(filename)-4] pickleFilePath := packageCompiledPath + "/" + fileNameRoot + ".rlc" sourceFileInfo, statErr := gos.Stat(sourceFilePath) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish source file '%s': %v\n", sourceFilePath, statErr) return } } else { sourceFound = true } pickleFileInfo, statErr := gos.Stat(pickleFilePath) if statErr != nil { if !os.IsNotExist(statErr) { err = fmt.Errorf("Can't stat relish intermediate-code file '%s': %v\n", pickleFilePath, statErr) return } } else { pickledFound = true } var parseNeeded bool if sourceFound { if pickledFound { if sourceFileInfo.ModTime().After(pickleFileInfo.ModTime()) { parseNeeded = true } } else { parseNeeded = true } } else if !pickledFound { err = fmt.Errorf("Error: Found neither relish source file '%s' nor intermediate-code file '%s'.\n", sourceFilePath, pickleFilePath) return } var fileNode *ast.File if parseNeeded { var fset = token.NewFileSet() fileNode, err = parser.ParseFile(fset, sourceFilePath, nil, parserDebugMode) if err != nil { err = fmt.Errorf("Error parsing file '%s': %v\n", sourceFilePath, err) return } err = ast.Pickle(fileNode, pickleFilePath) if err != nil { err = fmt.Errorf("Error pickling file '%s': %v\n", sourceFilePath, err) return } } else { // read the pickled (intermediate-code) file fileNode, err = ast.Unpickle(pickleFilePath) if err != nil { err = fmt.Errorf("Error unpickling file '%s': %v\n", pickleFilePath, err) return } } err = ldr.ensureImportsAreLoaded(fileNode) if err != nil { return } astFileNodes[fileNode] = fileNameRoot // gen = generator.NewGenerator(fileNode, fileNameRoot) // TODO NOW add a isLocal =ldr.LoadedArtifactKnownToBeLocal[originAndArtifactPath] // argument so that we can flag the RPackage object as local or shared. // gen.GenerateCode() if packageIdentifier != fileNode.Name.Name { err = fmt.Errorf("\nThe origin, artifact, or package metadata at top of source code file\n'%s'\ndoes not match the package directory path where the file resides.\n", sourceFilePath) return } if parseNeeded { if !ldr.quiet { Log(ALWAYS_, "Compiled %s\n", sourceFilePath) } } } // end of if it is a code file. } // end of loop over each file in the package. if len(astFileNodes) > 0 { gen = generator.NewGenerator(astFileNodes) gen.GenerateCode() } native_methods.WrapNativeMethods(packageIdentifier) // Check if package has native methods, if so, make RMethod wrappers. ldr.LoadedPackages[packageIdentifier] = version delete(ldr.PackagesBeingLoaded, packageIdentifier) if !ldr.quiet { Log(ALWAYS_, "Loaded %s\n", packageCompiledPath) } return }