func NewFlatHaveCache(gen string) *FlatHaveCache { filename := filepath.Join(osutil.CacheDir(), "camput.havecache."+escapeGen(gen)) c := &FlatHaveCache{ filename: filename, m: make(map[string]int64), } f, err := os.Open(filename) if os.IsNotExist(err) { return c } if err != nil { log.Fatalf("opening camput have-cache: %v", filename, err) } br := bufio.NewReader(f) for { ln, err := br.ReadString('\n') if err == io.EOF { break } if err != nil { log.Printf("Warning: (ignoring) reading have-cache: %v", err) break } f := strings.Fields(strings.TrimSpace(ln)) if len(f) == 2 { br, sizea := f[0], f[1] if size, err := strconv.ParseInt(sizea, 10, 64); err == nil && size >= 0 { c.m[br] = size } } } return c }
func NewFlatHaveCache() *FlatHaveCache { filename := filepath.Join(osutil.CacheDir(), "camput.havecache") c := &FlatHaveCache{ filename: filename, m: make(map[string]bool), } f, err := os.Open(filename) if os.IsNotExist(err) { return c } if err != nil { log.Fatalf("opening camput have-cache: %v", filename, err) } br := bufio.NewReader(f) for { ln, err := br.ReadString('\n') if err == io.EOF { break } if err != nil { log.Printf("Warning: (ignoring) reading have-cache: %v", err) break } ln = strings.TrimSpace(ln) c.m[ln] = true } return c }
func NewKvStatCache(gen string) *KvStatCache { fullPath := filepath.Join(osutil.CacheDir(), "camput.statcache."+escapeGen(gen)+".leveldb") db, err := leveldb.OpenFile(fullPath, nil) if err != nil { log.Fatalf("Could not create/open new stat cache at %v, %v", fullPath, err) } return &KvStatCache{ filename: fullPath, db: db, } }
func NewKvHaveCache(gen string) *KvHaveCache { fullPath := filepath.Join(osutil.CacheDir(), "camput.havecache."+escapeGen(gen)+".kv") db, err := kvutil.Open(fullPath, nil) if err != nil { log.Fatalf("Could not create/open new have cache at %v, %v", fullPath, err) } return &KvHaveCache{ filename: fullPath, db: db, } }
func NewFlatStatCache(gen string) *FlatStatCache { filename := filepath.Join(osutil.CacheDir(), "camput.statcache."+escapeGen(gen)) fc := &FlatStatCache{ filename: filename, m: make(map[string]fileInfoPutRes), } f, err := os.Open(filename) if os.IsNotExist(err) { return fc } if err != nil { log.Fatalf("opening camput stat cache: %v", filename, err) } defer f.Close() br := bufio.NewReader(f) for { ln, err := br.ReadString('\n') if err == io.EOF { break } if err != nil { log.Printf("Warning: (ignoring) reading stat cache: %v", err) break } ln = strings.TrimSpace(ln) f := strings.Split(ln, "\t") if len(f) < 3 { continue } filename, fp, putres := f[0], statFingerprint(f[1]), f[2] f = strings.Split(putres, "/") if len(f) != 2 { continue } blobrefStr := f[0] blobSize, err := strconv.ParseInt(f[1], 10, 64) if err != nil { continue } fc.m[filename] = fileInfoPutRes{ Fingerprint: fp, Result: client.PutResult{ BlobRef: blobref.Parse(blobrefStr), Size: blobSize, Skipped: true, // is this used? }, } } vlog.Printf("Flatcache read %d entries from %s", len(fc.m), filename) return fc }
// Delete stranded lock files and all but the oldest 5 // havecache/statcache files, unless they're newer than 30 days. func cleanCacheDir() { dir := osutil.CacheDir() f, err := os.Open(dir) if err != nil { return } defer f.Close() fis, err := f.Readdir(-1) if err != nil { return } var haveCache, statCache []os.FileInfo seen := make(map[string]bool) for _, fi := range fis { seen[fi.Name()] = true } for name := range seen { if strings.HasSuffix(name, ".lock") && !seen[strings.TrimSuffix(name, ".lock")] { os.Remove(filepath.Join(dir, name)) } } for _, fi := range fis { if strings.HasSuffix(fi.Name(), ".lock") { continue } if strings.HasPrefix(fi.Name(), "camput.havecache.") { haveCache = append(haveCache, fi) continue } if strings.HasPrefix(fi.Name(), "camput.statcache.") { statCache = append(statCache, fi) continue } } for _, list := range [][]os.FileInfo{haveCache, statCache} { if len(list) <= 5 { continue } sort.Sort(byModtime(list)) list = list[:len(list)-5] for _, fi := range list { if fi.ModTime().Before(time.Now().Add(-30 * 24 * time.Hour)) { os.Remove(filepath.Join(dir, fi.Name())) } } } }
// NewDiskCache returns a new DiskCache from a Fetcher, which // is usually the pkg/client HTTP client (which typically has much // higher latency and lower bandwidth than local disk). func NewDiskCache(fetcher blob.Fetcher) (*DiskCache, error) { cacheDir := filepath.Join(osutil.CacheDir(), "blobs") if !osutil.DirExists(cacheDir) { if err := os.Mkdir(cacheDir, 0700); err != nil { log.Printf("Warning: failed to make %s: %v; using tempdir instead", cacheDir, err) cacheDir, err = ioutil.TempDir("", "camlicache") if err != nil { return nil, err } } } // TODO: max disk size, keep LRU of access, smarter cleaning, etc // TODO: use diskpacked instead? harder to clean, though. diskcache, err := localdisk.New(cacheDir) if err != nil { return nil, err } dc := &DiskCache{ CachingFetcher: NewCachingFetcher(diskcache, fetcher), Root: cacheDir, } return dc, nil }
func NewSQLiteHaveCache(gen string) *SQLiteHaveCache { checkCmdInstalled() filename := filepath.Join(osutil.CacheDir(), "camput.havecache."+escapeGen(gen)+".db") out, err := exec.Command(cmdName, filename, testTable).Output() if err != nil { log.Fatalf("Failed to test for %v table existence: %v", haveTableName, err) } if len(out) == 0 { // file or table does not exist err = exec.Command(cmdName, filename, createHaveTable).Run() if err != nil { log.Fatalf("Failed to create %v table for have cache: %v", haveTableName, err) } } else { if string(out) != haveTableName+"\n" { log.Fatalf("Wrong table name for have cache; was expecting %v, got %q", haveTableName, out) } } return &SQLiteHaveCache{ filename: filename, } }