Example #1
0
func TestImportCopy(t *testing.T) {
	b := []byte(`¡åéîòü!`)

	f, err := ioutil.TempFile("", "temp-test")
	if err != nil {
		t.Fatal(err)
	}
	if _, err := f.Write(b); err != nil {
		t.Fatal(err)
	}
	f.Close()

	d := diskv.New(diskv.Options{
		BasePath: "test-import-copy",
	})
	defer d.EraseAll()

	if err := d.Import(f.Name(), "key", false); err != nil {
		t.Fatal(err)
	}

	if _, err := os.Stat(f.Name()); err != nil {
		t.Errorf("expected temp file to remain, but got err = %v", err)
	}
}
Example #2
0
func main() {
	d := diskv.New(diskv.Options{
		BasePath:     "data",
		Transform:    blockTransform,
		CacheSizeMax: 1024 * 1024, // 1MB
	})

	for _, valueStr := range []string{
		"I am the very model of a modern Major-General",
		"I've information vegetable, animal, and mineral",
		"I know the kings of England, and I quote the fights historical",
		"From Marathon to Waterloo, in order categorical",
		"I'm very well acquainted, too, with matters mathematical",
		"I understand equations, both the simple and quadratical",
		"About binomial theorem I'm teeming with a lot o' news",
		"With many cheerful facts about the square of the hypotenuse",
	} {
		d.Write(md5sum(valueStr), []byte(valueStr))
	}

	var keyCount int
	for key := range d.Keys() {
		val, err := d.Read(key)
		if err != nil {
			panic(fmt.Sprintf("key %s had no value", key))
		}
		fmt.Printf("%s: %s\n", key, val)
		keyCount++
	}
	fmt.Printf("%d total keys\n", keyCount)

	// d.EraseAll() // leave it commented out to see how data is kept on disk
}
Example #3
0
func init() {
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
		flag.PrintDefaults()
		fmt.Fprintln(os.Stderr)
		fmt.Fprintln(os.Stderr, "Updates can be triggered by sending SIGUSR1.")
		fmt.Fprintln(os.Stderr, "Automatic updates can be toggled by sending SIGUSR2.")
		fmt.Fprintln(os.Stderr, "A cache purge can be triggered by sending SIGHUP.")
		fmt.Fprintln(os.Stderr)
	}
	flag.Parse()
	subreddits = strings.Split(*sr, ",")
	log.Printf("watching subreddits: %v\n", subreddits)
	userBlacklist = strings.Split(*ul, ",")
	log.Printf("ignoring users: %v\n", userBlacklist)
	cacheDir := fmt.Sprintf("%s/dereddit.cache", os.TempDir())
	os.Mkdir(*rssDir, 0777)
	if *apiKey == "" {
		log.Fatalln("api key not specified")
	}
	o := diskv.Options{
		BasePath:    cacheDir,
		Compression: diskv.NewGzipCompression(),
		PathPerm:    0755,
		FilePerm:    0666,
	}
	cache = diskv.New(o)
	log.Printf("cache %s opened\n", cacheDir)
	log.Printf("outputting rss feeds to %s\n", *rssDir)
}
Example #4
0
func TestKeysCancel(t *testing.T) {
	d := diskv.New(diskv.Options{
		BasePath: "test-data",
	})
	defer d.EraseAll()

	for k, v := range keysTestData {
		d.Write(k, []byte(v))
	}

	var (
		cancel      = make(chan struct{})
		received    = 0
		cancelAfter = len(keysTestData) / 2
	)

	for key := range d.Keys(cancel) {
		received++

		if received >= cancelAfter {
			close(cancel)
			runtime.Gosched() // allow walker to detect cancel
		}

		t.Logf("received %d: %q", received, key)
	}

	if want, have := cancelAfter, received; want != have {
		t.Errorf("want %d, have %d")
	}
}
Example #5
0
func main() {
	// Simplest transform function: put all the data files into the base dir.
	flatTransform := func(s string) []string {
		ss := strings.Split(s, ".")
		return ss[0 : len(ss)-1]
	}

	// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
	d := diskv.New(diskv.Options{
		BasePath:     "my-data-dir",
		Transform:    flatTransform,
		CacheSizeMax: 1024 * 1024,
		PathPerm:     0750,
		FilePerm:     0640,
	})

	// Write three bytes to the key "alpha".
	key := "alpha"
	d.Write(key, []byte{'1', '2', '3'})
	d.Write("beta", []byte{'4', '5', '6'})
	d.Write("sub.alfa", []byte("Hello alpa date"))
	d.Write("sub.omega", []byte("Good buy omega"))

	// Read the value back out of the store.
	value, _ := d.Read(key)
	fmt.Printf("%v\n", value)

	// Erase the key+value from the store (and the disk).
	d.Erase(key)
}
Example #6
0
func main() {
	flag.Parse()

	if *version {
		fmt.Printf("%v\nBuild: %v\n", VERSION, BUILD_DATE)
		return
	}

	var c httpcache.Cache
	if *cacheDir != "" {
		d := diskv.New(diskv.Options{
			BasePath:     *cacheDir,
			CacheSizeMax: *cacheSize * 1024 * 1024,
		})
		c = diskcache.NewWithDiskv(d)
	} else if *cacheSize != 0 {
		c = httpcache.NewMemoryCache()
	}

	p := imageproxy.NewProxy(nil, c)
	if *whitelist != "" {
		p.Whitelist = strings.Split(*whitelist, ",")
	}
	if *referrers != "" {
		p.Referrers = strings.Split(*referrers, ",")
	}
	if *signatureKey != "" {
		key := []byte(*signatureKey)
		if strings.HasPrefix(*signatureKey, "@") {
			file := strings.TrimPrefix(*signatureKey, "@")
			var err error
			key, err = ioutil.ReadFile(file)
			if err != nil {
				log.Fatalf("error reading signature file: %v", err)
			}
		}
		p.SignatureKey = key
	}
	if *baseURL != "" {
		var err error
		p.DefaultBaseURL, err = url.Parse(*baseURL)
		if err != nil {
			log.Fatalf("error parsing baseURL: %v", err)
		}
	}

	p.ScaleUp = *scaleUp

	server := &http.Server{
		Addr:    *addr,
		Handler: p,
	}

	fmt.Printf("imageproxy (version %v) listening on %s\n", VERSION, server.Addr)
	err := server.ListenAndServe()
	if err != nil {
		log.Fatal("ListenAndServe: ", err)
	}
}
Example #7
0
// New returns a new Cache that will store files in basePath
func New(basePath string) *Cache {
	return &Cache{
		d: diskv.New(diskv.Options{
			BasePath:     basePath,
			CacheSizeMax: 100 * 1024 * 1024, // 100MB
		}),
	}
}
Example #8
0
func diskCache(path string) *diskcache.Cache {
	d := diskv.New(diskv.Options{
		BasePath: path,

		// For file "c0ffee", store file as "c0/ff/c0ffee"
		Transform: func(s string) []string { return []string{s[0:2], s[2:4]} },
	})
	return diskcache.NewWithDiskv(d)
}
func NewPersistentStorageEngine() StorageEngine {
	storageEngine := new(PersistentStorageEngine)

	// TODO set up a folder structure for < 1k entries per folder
	// actual TODO set up a proper datastore
	flatTransform := func(s string) []string { return []string{} }
	storageEngine.tweetStore = diskv.New(diskv.Options{
		BasePath:     "storage/tweets",
		Transform:    flatTransform,
		CacheSizeMax: 1024 * 1024,
	})
	storageEngine.gameStateStore = diskv.New(diskv.Options{
		BasePath:     "storage/game-states",
		Transform:    flatTransform,
		CacheSizeMax: 1024 * 1024,
	})

	return storageEngine
}
Example #10
0
func newInformer(graph, entity string) Informer {
	return &informer{
		d: diskv.New(diskv.Options{
			BasePath:     basePathFor(graph, entity),
			Transform:    decimalSplit(4),
			CacheSizeMax: 0,
			Compression:  diskv.NewZlibCompression(),
		}),
	}
}
Example #11
0
func NewConfigMgr() *ConfigMgr {
	path := "/tmp/db-diskv"

	diskv := diskv.New(diskv.Options{
		BasePath:     path,
		Transform:    blockTransform,
		CacheSizeMax: 1024 * 1024, // 1MB
	})
	db := &Db{diskv: diskv}
	return &ConfigMgr{db: db, users: make(map[string]*UserInfo), dns: make(map[string]*UserInfo)}
}
Example #12
0
func NewDiskvStorageBackend(path string) StorageBackend {
	diskv := &DiskvStorageBackend{
		store: diskv.New(diskv.Options{
			BasePath:     path,
			Transform:    transformFunc,
			CacheSizeMax: 0, // no cache
		}),
	}

	return diskv
}
Example #13
0
func New(cacheDir string, cacheSize uint64, bucketURL string) *Cache {
	if cacheDir == "" {
		cacheDir, _ = ioutil.TempDir("", "disks3cache")
	}
	dv := diskv.New(diskv.Options{
		BasePath:     cacheDir,
		CacheSizeMax: cacheSize * 1024 * 1024,
	})
	return &Cache{
		disk: diskcache.NewWithDiskv(dv),
		s3:   s3cache.New(bucketURL),
	}
}
Example #14
0
func TestKeysNested(t *testing.T) {
	d := diskv.New(diskv.Options{
		BasePath:  "test-data",
		Transform: blockTransform(2),
	})
	defer d.EraseAll()

	for k, v := range keysTestData {
		d.Write(k, []byte(v))
	}

	checkKeys(t, d.Keys(nil), keysTestData)
}
Example #15
0
func TestKeysPrefixFlat(t *testing.T) {
	d := diskv.New(diskv.Options{
		BasePath: "test-data",
	})
	defer d.EraseAll()

	for k, v := range keysTestData {
		d.Write(k, []byte(v))
	}

	for _, prefix := range prefixes {
		checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix))
	}
}
Example #16
0
func (m *ByteStore) Start(config string) error {
	var c Config
	err := json.Unmarshal([]byte(config), &c)
	if err != nil {
		return err
	}

	transformFunc := func(s string) []string {
		return strings.Split(s, SeparationCharacter)
	}
	m.DB = diskv.New(diskv.Options{
		BasePath:     c.RootDir,
		Transform:    transformFunc,
		CacheSizeMax: 1024 * 1024 * c.CacheSize, // in megabytes
	})
	return nil
}
Example #17
0
func main() {

	// Set server address
	addr := os.Getenv("ADDRESS")
	if addr == "" {
		log.Fatal("No address provided for the imageproxy")
	}

	// Set cache
	var cache httpcache.Cache
	d := diskv.New(diskv.Options{
		BasePath:     "/tmp/imageproxy",
		CacheSizeMax: 500 * 1024 * 1024,
	})
	cache = diskcache.NewWithDiskv(d)

	// Create proxy
	p := imageproxy.NewProxy(nil, cache)

	// Create whitelist
	if os.Getenv("WHITELIST") != "" {
		p.Whitelist = strings.Split(os.Getenv("WHITELIST"), ",")
	}

	// Create baseurl
	if os.Getenv("BASEURL") != "" {
		var err error
		p.DefaultBaseURL, err = url.Parse(os.Getenv("BASEURL"))
		if err != nil {
			log.Fatalf("error parsing baseURL: %v", err)
		}
	}

	p.ScaleUp = true

	server := &http.Server{
		Addr:    addr,
		Handler: p,
	}

	fmt.Printf("imageproxy listening on " + addr)
	err := server.ListenAndServe()
	if err != nil {
		log.Fatal("ListenAndServe: ", err)
	}
}
Example #18
0
func TestKeysFlat(t *testing.T) {
	transform := func(s string) []string {
		if s == "" {
			t.Fatalf(`transform should not be called with ""`)
		}
		return []string{}
	}
	d := diskv.New(diskv.Options{
		BasePath:  "test-data",
		Transform: transform,
	})
	defer d.EraseAll()

	for k, v := range keysTestData {
		d.Write(k, []byte(v))
	}

	checkKeys(t, d.Keys(nil), keysTestData)
}
Example #19
0
func main() {
	c := diskv.New(diskv.Options{
		BasePath:     "./my-diskv-data-directory",
		Transform:    func(s string) []string { return []string{} },
		CacheSizeMax: 1024 * 1024, // 1MB
	})
	b, _ := json.Marshal(Test{Name: "figo", Tp: "android", Count: 1024})
	c.Write("test", b)
	for k := range c.Keys(nil) {
		log.Println("@k:", k)
		d, _ := c.Read(k)
		v := &Test{}
		json.Unmarshal(d, v)
		log.Println("@name:", v.Name, "@count:", v.Count, "@tp:", v.Tp)
	}
	d, _ := c.Read("test")
	v := &Test{}
	json.Unmarshal(d, v)
	log.Println("@name:", v.Name, "@count:", v.Count, "@tp:", v.Tp)

	st := utee.Tick()
	//	1000*
	for i := 0; i < 100*10000; i++ {
		b, _ := json.Marshal(Test{Name: "figo", Tp: "android", Count: 1024})
		c.Write(fmt.Sprint("test", i), b)
		//		c.WriteStream(fmt.Sprint("test",i),strings.NewReader( string(b)),false)
	}
	log.Println("100,0000 write cost ", (utee.Tick() - st), "m second")
	st = utee.Tick()
	for k := range c.Keys(nil) {
		b, e := c.Read(k)
		if e != nil {
			log.Println("@error:", e)
		}
		v := &Test{}
		json.Unmarshal(b, v)
		log.Println("@name:", v.Name, "@count:", v.Count, "@tp:", v.Tp)

	}
	log.Println("100,0000  read cost ", (utee.Tick() - st), "m second")
}
Example #20
0
// setupDb configures the key-value store to which POSTed data will be written,
// rooted in dbDir.
func setupDb(dbDir string) {
	// Based on https://github.com/peterbourgon/diskv/blob/master/examples/content-addressable-store/cas.go#L14
	blockTransform := func(s string) []string {
		sliceSize := len(s) / transformBlockSize
		pathSlice := make([]string, sliceSize)
		for i := 0; i < sliceSize; i++ {
			from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize
			pathSlice[i] = s[from:to]
		}
		return pathSlice
	}

	// Initialize a new diskv store
	db = diskv.New(diskv.Options{
		BasePath: dbDir,
		// Transform:    func(s string) []string { return []string{} },
		Transform:    blockTransform,
		CacheSizeMax: uint64(maxDataSize),
	})
	return
}
Example #21
0
func main() {
	d := diskv.New(diskv.Options{
		BasePath:     "my-diskv-data-directory",
		Transform:    func(s string) []string { return []string{} },
		CacheSizeMax: 1024 * 1024, // 1MB
	})

	key := "alpha"
	if err := d.Write(key, []byte{'1', '2', '3'}); err != nil {
		panic(err)
	}

	value, err := d.Read(key)
	if err != nil {
		panic(err)
	}
	fmt.Printf("%v\n", value)

	if err := d.Erase(key); err != nil {
		panic(err)
	}
}
Example #22
0
func TestImportMove(t *testing.T) {
	b := []byte(`0123456789`)
	f, err := ioutil.TempFile("", "temp-test")
	if err != nil {
		t.Fatal(err)
	}
	if _, err := f.Write(b); err != nil {
		t.Fatal(err)
	}
	f.Close()

	d := diskv.New(diskv.Options{
		BasePath: "test-import-move",
	})
	defer d.EraseAll()

	key := "key"

	if err := d.Write(key, []byte(`TBD`)); err != nil {
		t.Fatal(err)
	}

	if err := d.Import(f.Name(), key, true); err != nil {
		t.Fatal(err)
	}

	if _, err := os.Stat(f.Name()); err == nil || !os.IsNotExist(err) {
		t.Errorf("expected temp file to be gone, but err = %v", err)
	}

	if !d.Has(key) {
		t.Errorf("%q not present", key)
	}

	if buf, err := d.Read(key); err != nil || bytes.Compare(b, buf) != 0 {
		t.Errorf("want %q, have %q (err = %v)", string(b), string(buf), err)
	}
}
Example #23
0
func (s *DiskvSpooler) Start(sz Serializer) error {
	// Create the data dir if necessary.
	if err := os.Mkdir(s.dataDir, 0775); err != nil {
		if !os.IsExist(err) {
			return err
		}
	}

	// T{} -> []byte
	s.sz = sz

	// diskv reads all files in BasePath on startup.
	s.cache = diskv.New(diskv.Options{
		BasePath:     s.dataDir,
		Transform:    func(s string) []string { return []string{} },
		CacheSizeMax: CACHE_SIZE,
		Index:        &diskv.LLRBIndex{},
		IndexLess:    func(a, b string) bool { return a < b },
	})

	go s.run()
	s.logger.Info("Started")
	return nil
}
Example #24
0
// create a new server from the configuration directory
func NewServer(config *Configuration) (srv *Server, err error) {
	err = config.Validate()
	if err != nil {
		log.Printf("configuration problem: %v\n", err)
		return
	}

	sources, err := CreateSources(config)
	if err != nil {
		log.Printf("Could setup sources: %v\n", err)
		return
	}

	templ, err := template.New("t").ParseGlob(path.Join(config.TemplateDirectory(), "*.html"))
	if err != nil {
		log.Printf("Could not setup templates: %v\n", err)
		return
	}

	err = EnsureDirectoryExists(config.Cache.Directory)
	if err != nil {
		log.Printf("Could not create cache directory: %v\n", err)
		return
	}
	cache := NewForgettingCache(
		diskv.New(diskv.Options{
			BasePath:     config.Cache.Directory,
			CacheSizeMax: 0,
			Transform:    cacheTransformKeyToPath,
		}), 10)

	imgProxy, err := NewImgProxy(config, cache)
	if err != nil {
		log.Printf("Could not setup caching proxy: %v\n", err)
		return
	}

	srv = &Server{
		config:         config,
		sources:        sources,
		blockStore:     NewBlockStore(),
		templ:          templ,
		router:         httprouter.New(),
		imgProxy:       imgProxy,
		doUpdatingChan: make(chan bool),
		cache:          cache,
	}

	// goroutine to update the blocks from the sources
	go func() {
		doUpdating := false
		updateTimeout := 10
		for {
			if doUpdating {
				log.Printf("Pulling sources.")

				err := srv.PullSources()
				if err != nil {
					log.Printf("Could not pull sources: %v", err)
				}
				if updateTimeout > 0 && srv.blockStore.Size() > 0 {
					updateTimeout = srv.config.UpdateInterval
				}
			}

			if updateTimeout > 0 {
				select {
				case doUpdating = <-srv.doUpdatingChan:
					continue
				case <-time.After(time.Second * time.Duration(updateTimeout)):
					continue
				}
			} else {
				doUpdating = <-srv.doUpdatingChan
			}
		}
	}()

	srv.router.GET("/", srv.handleIndexPage)
	srv.router.GET("/image/:id", srv.handleImageRequest)

	fileServer := http.FileServer(http.Dir(config.StaticFilesDirectory()))
	srv.router.Handler("GET", "/static/*filepath", http.StripPrefix("/static/", fileServer))
	// fallback to static files un-resolved requests in root directory - for files
	// like favicon.ico and robots.txt
	srv.router.NotFound = fileServer
	return
}
Example #25
0
		classif = "CODE"
	} else {
		classif = "NOTCODE"
	}

	return classif + " " + summary, nil
}

var (
	// httpCacheDir is the directory used for caching HTTP responses. It can be reused
	// executions (it is not necessary to create a new random temp dir upon
	// startup).
	httpCacheDir = "/tmp/thesrc-http-cache"

	localCache = diskcache.NewWithDiskv(diskv.New(diskv.Options{
		BasePath:     httpCacheDir,
		CacheSizeMax: 50 * 1024 * 1024 * 1024, // 50 GB
	}))

	httpClient = &http.Client{
		//Transport: &httpcache.Transport{Cache: localCache},
		Timeout: time.Second * 3,
		// TODO(sqs): add timeout
	}
)

func init() {
	if err := os.Mkdir(httpCacheDir, 0700); err != nil && !os.IsExist(err) {
		log.Fatalf("Mkdir(%s) failed: %s", httpCacheDir, err)
	}
}
Example #26
0
func NewStore(base string) (*Store, error) {
	casDir := filepath.Join(base, "cas")

	s := &Store{
		base:   base,
		stores: make([]*diskv.Diskv, len(diskvStores)),
	}

	s.imageLockDir = filepath.Join(casDir, "imagelocks")
	err := os.MkdirAll(s.imageLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	s.treeStoreLockDir = filepath.Join(casDir, "treestorelocks")
	err = os.MkdirAll(s.treeStoreLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	// Take a shared cas lock
	s.storeLock, err = lock.NewLock(casDir, lock.Dir)
	if err != nil {
		return nil, err
	}

	for i, p := range diskvStores {
		s.stores[i] = diskv.New(diskv.Options{
			BasePath:  filepath.Join(casDir, p),
			Transform: blockTransform,
		})
	}
	db, err := NewDB(filepath.Join(casDir, "db"))
	if err != nil {
		return nil, err
	}
	s.db = db

	s.treestore = &TreeStore{path: filepath.Join(base, "cas", "tree")}

	needsMigrate := false
	fn := func(tx *sql.Tx) error {
		var err error
		ok, err := dbIsPopulated(tx)
		if err != nil {
			return err
		}
		// populate the db
		if !ok {
			for _, stmt := range dbCreateStmts {
				_, err = tx.Exec(stmt)
				if err != nil {
					return err
				}
			}
			return nil
		}
		// if db is populated check its version
		version, err := getDBVersion(tx)
		if err != nil {
			return err
		}
		if version < dbVersion {
			needsMigrate = true
		}
		if version > dbVersion {
			return fmt.Errorf("Current store db version: %d greater than the current rkt expected version: %d", version, dbVersion)
		}
		return nil
	}
	if err = db.Do(fn); err != nil {
		return nil, err
	}

	// migration is done in another transaction as it must take an exclusive
	// store lock. If, in the meantime, another process has already done the
	// migration, between the previous db version check and the below
	// migration code, the migration will do nothing as it'll start
	// migration from the current version.
	if needsMigrate {
		// Take an exclusive store lock
		err := s.storeLock.ExclusiveLock()
		if err != nil {
			return nil, err
		}
		// TODO(sgotti) take a db backup (for debugging and last resort rollback?)
		fn := func(tx *sql.Tx) error {
			return migrate(tx, dbVersion)
		}
		if err = db.Do(fn); err != nil {
			return nil, err
		}
	}

	return s, nil
}
Example #27
0
func main() {
	ll := flag.String("log", "ERROR", "Set the log level")
	version := flag.Bool("version", false, "Display the version number")

	configDir, err := getConfigPath()
	if err != nil {
		panic(err)
	}

	envFile := configDir + "/ssh-manage.env"
	_, err = os.Stat(envFile)
	if err == nil {
		loadConfig(envFile)
	}

	d := diskv.New(diskv.Options{
		BasePath:     configDir + "/hosts", // where the data is stored
		Transform:    BlockTransform,
		CacheSizeMax: 1024 * 1024, // 1MB
	})

	flag.Usage = usage
	flag.Parse()

	if *version {
		fmt.Printf("%s version: %v\n", os.Args[0], VERSION)
		os.Exit(0)
	}

	logLevel := getLogLevel(*ll)
	l = llog.New(os.Stdout, logLevel)

	logHandler("DEBUG", fmt.Sprintln("configuration directory:", configDir))

	if flag.NArg() == 0 {
		logHandler("ERROR", "please supply a command")
		// TODO list supported commands (Redirect to help message or usage text?)
		os.Exit(1)
	}

	// TODO add the ability to set if a record or records should get
	// printed.  This needs to be host dependant.
	switch flag.Arg(0) {
	case "add":
		var hostInfo string
		if flag.Arg(2) != "" {
			hostInfo = flag.Arg(2)
		}

		err = addRecord(d, strings.TrimSpace(flag.Arg(1)), hostInfo)
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("failed creating a new record: %s\n", err.Error()))
			os.Exit(1)
		}
	case "get":
		err := getRecord(d, strings.TrimSpace(flag.Arg(1)))
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("failed fetching record details: %s\n", err.Error()))
			os.Exit(1)
		}
	case "list":
		err := listRecords(d)
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("failed fetching all records: %s\n", err.Error()))
			os.Exit(1)
		}
	case "rm":
		err := removeRecord(d, strings.TrimSpace(flag.Arg(1)))
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("failed removing record: %s\n", err.Error()))
			os.Exit(1)
		}
	case "write":
		err := writeFile(d)
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("failed when writing out SSH configuration file: %s\n",
					err.Error()))
			os.Exit(1)
		}
	case "update":
		if flag.Arg(1) == "" {
			logHandler("ERROR", "update requires an argument")
			os.Exit(1)
		}

		err := updateRecord(d, strings.TrimSpace(flag.Arg(1)))
		if err != nil {
			logHandler("ERROR",
				fmt.Sprintf("faild updating record: %s\n", err.Error()))
			os.Exit(1)
		}
	default:
		usage()
		os.Exit(1)
	}

	os.Exit(0)
}
Example #28
0
File: store.go Project: nhlfr/rkt
func NewStore(dir string) (*Store, error) {
	// We need to allow the store's setgid bits (if any) to propagate, so
	// disable umask
	um := syscall.Umask(0)
	defer syscall.Umask(um)

	s := &Store{
		dir:    dir,
		stores: make([]*diskv.Diskv, len(diskvStores)),
	}

	s.imageLockDir = filepath.Join(dir, "imagelocks")
	err := os.MkdirAll(s.imageLockDir, defaultPathPerm)
	if err != nil {
		return nil, err
	}

	// Take a shared cas lock
	s.storeLock, err = lock.NewLock(dir, lock.Dir)
	if err != nil {
		return nil, err
	}
	if err := s.storeLock.SharedLock(); err != nil {
		return nil, err
	}

	for i, p := range diskvStores {
		s.stores[i] = diskv.New(diskv.Options{
			PathPerm:  defaultPathPerm,
			FilePerm:  defaultFilePerm,
			BasePath:  filepath.Join(dir, p),
			Transform: blockTransform,
		})
	}
	db, err := db.NewDB(s.dbDir())
	if err != nil {
		return nil, err
	}
	s.db = db

	needsMigrate := false
	needsSizePopulation := false
	fn := func(tx *sql.Tx) error {
		var err error
		ok, err := dbIsPopulated(tx)
		if err != nil {
			return err
		}
		// populate the db
		if !ok {
			for _, stmt := range dbCreateStmts {
				_, err = tx.Exec(stmt)
				if err != nil {
					return err
				}
			}
			return nil
		}
		// if db is populated check its version
		version, err := getDBVersion(tx)
		if err != nil {
			return err
		}
		if version < dbVersion {
			needsMigrate = true
		}
		if version > dbVersion {
			return fmt.Errorf("current store db version: %d (greater than the current rkt expected version: %d)", version, dbVersion)
		}
		if version < 5 {
			needsSizePopulation = true
		}
		return nil
	}
	if err = db.Do(fn); err != nil {
		return nil, err
	}

	// migration is done in another transaction as it must take an exclusive
	// store lock. If, in the meantime, another process has already done the
	// migration, between the previous db version check and the below
	// migration code, the migration will do nothing as it'll start
	// migration from the current version.
	if needsMigrate {
		// Take an exclusive store lock
		err := s.storeLock.ExclusiveLock()
		if err != nil {
			return nil, err
		}
		if err := s.backupDB(); err != nil {
			return nil, err
		}
		fn := func(tx *sql.Tx) error {
			return migrate(tx, dbVersion)
		}
		if err = db.Do(fn); err != nil {
			return nil, err
		}

		if needsSizePopulation {
			if err := s.populateSize(); err != nil {
				return nil, err
			}
		}
	}

	return s, nil
}
Example #29
0
	"github.com/peterbourgon/diskv"
	"log"
	"os"
	"strings"
	"time"
)

var server = flag.String("server", "talk.google.com:443", "server")
var username = flag.String("username", "", "username")
var password = flag.String("password", "", "password")
var adduser = flag.String("adduser", "", "adduser")
var rmuser = flag.String("rmuser", "", "rmuser")

var db = diskv.New(diskv.Options{
	BasePath:     "chat_users",
	Transform:    func(s string) []string { return []string{} },
	CacheSizeMax: 1024 * 1024, // 1MB
})

var users map[string]string

func handle(client *xmpp.Client, chat xmpp.Chat) {

	user := strings.Split(chat.Remote, "/")[0]

	_, ok := users[user]
	if !ok {
		log.Printf("User %s not found.", user)
		return
	}