Example #1
0
// newIndex returns a new sorted.KeyValue, using either the given config, or the default.
func newIndex(root string, indexConf jsonconfig.Obj) (sorted.KeyValue, error) {
	if len(indexConf) > 0 {
		return sorted.NewKeyValue(indexConf)
	}
	return sorted.NewKeyValue(jsonconfig.Obj{
		"type": defaultIndexType,
		"file": filepath.Join(root, defaultIndexFile),
	})
}
Example #2
0
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) {
	var (
		smallPrefix = conf.RequiredString("smallBlobs")
		largePrefix = conf.RequiredString("largeBlobs")
		metaConf    = conf.RequiredObject("metaIndex")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	small, err := ld.GetStorage(smallPrefix)
	if err != nil {
		return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err)
	}
	large, err := ld.GetStorage(largePrefix)
	if err != nil {
		return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err)
	}
	largeSubber, ok := large.(subFetcherStorage)
	if !ok {
		return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs",
			largePrefix, large)
	}
	meta, err := sorted.NewKeyValue(metaConf)
	if err != nil {
		return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err)
	}
	sto := &storage{
		small: small,
		large: largeSubber,
		meta:  meta,
	}
	sto.init()

	recoveryMu.Lock()
	defer recoveryMu.Unlock()
	if recovery {
		log.Print("Starting recovery of blobpacked index")
		if err := meta.Close(); err != nil {
			return nil, err
		}
		if err := sto.reindex(context.TODO(), func() (sorted.KeyValue, error) {
			return sorted.NewKeyValue(metaConf)
		}); err != nil {
			return nil, err
		}
		return sto, nil
	}
	// Check for a weird state: zip files exist, but no metadata about them
	// is recorded. This is probably a corrupt state, and the user likely
	// wants to recover.
	if !sto.anyMeta() && sto.anyZipPacks() {
		log.Fatal("Error: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode with -recovery.")
	}

	return sto, nil
}
Example #3
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	blobPrefix := config.RequiredString("blobSource")
	kvConfig := config.RequiredObject("storage")
	if err := config.Validate(); err != nil {
		return nil, err
	}
	kv, err := sorted.NewKeyValue(kvConfig)
	if err != nil {
		return nil, err
	}

	ix, err := New(kv)
	if err != nil {
		return nil, err
	}

	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		ix.Close()
		return nil, err
	}
	ix.BlobSource = sto

	// Good enough, for now:
	ix.KeyFetcher = ix.BlobSource

	return ix, err
}
Example #4
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
	blobPrefix := config.RequiredString("blobSource")
	kvConfig := config.RequiredObject("storage")
	if err := config.Validate(); err != nil {
		return nil, err
	}
	kv, err := sorted.NewKeyValue(kvConfig)
	if err != nil {
		return nil, err
	}
	sto, err := ld.GetStorage(blobPrefix)
	if err != nil {
		return nil, err
	}

	ix, err := New(kv)
	// TODO(mpl): next time we need to do another fix, make a new error
	// type that lets us apply the needed fix depending on its value or
	// something. For now just one value/fix.
	if err == errMissingWholeRef {
		// TODO: maybe we don't want to do that automatically. Brad says
		// we have to think about the case on GCE/CoreOS in particular.
		if err := ix.fixMissingWholeRef(sto); err != nil {
			ix.Close()
			return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err)
		}
		ix, err = New(kv)
	}
	if err != nil {
		return nil, err
	}
	ix.InitBlobSource(sto)

	return ix, err
}
Example #5
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
	var (
		origin        = config.RequiredString("origin")
		cache         = config.RequiredString("cache")
		kvConf        = config.RequiredObject("meta")
		maxCacheBytes = config.OptionalInt64("maxCacheBytes", 512<<20)
	)
	if err := config.Validate(); err != nil {
		return nil, err
	}
	cacheSto, err := ld.GetStorage(cache)
	if err != nil {
		return nil, err
	}
	originSto, err := ld.GetStorage(origin)
	if err != nil {
		return nil, err
	}
	kv, err := sorted.NewKeyValue(kvConf)
	if err != nil {
		return nil, err
	}

	// TODO: enumerate through kv and calculate current size.
	// Maybe also even enumerate through cache to see if they match.
	// Or even: keep it only in memory and not in kv?

	s := &sto{
		origin:        originSto,
		cache:         cacheSto,
		maxCacheBytes: maxCacheBytes,
		kv:            kv,
	}
	return s, nil
}
Example #6
0
func newSorted(t *testing.T) (kv sorted.KeyValue, clean func()) {
	f, err := ioutil.TempFile("", "sqlite-test")
	if err != nil {
		t.Fatal(err)
	}
	db, err := sql.Open("sqlite3", f.Name())
	if err != nil {
		t.Fatalf("opening test database: %v", err)
	}
	for _, tableSql := range sqlite.SQLCreateTables() {
		do(db, tableSql)
	}
	do(db, fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, sqlite.SchemaVersion()))

	kv, err = sorted.NewKeyValue(jsonconfig.Obj{
		"type": "sqlite",
		"file": f.Name(),
	})
	if err != nil {
		t.Fatal(err)
	}
	return kv, func() {
		kv.Close()
		os.Remove(f.Name())
	}
}
Example #7
0
func newFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (blobserver.Storage, error) {
	var (
		smallPrefix = conf.RequiredString("smallBlobs")
		largePrefix = conf.RequiredString("largeBlobs")
		metaConf    = conf.RequiredObject("metaIndex")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	small, err := ld.GetStorage(smallPrefix)
	if err != nil {
		return nil, fmt.Errorf("failed to load smallBlobs at %s: %v", smallPrefix, err)
	}
	large, err := ld.GetStorage(largePrefix)
	if err != nil {
		return nil, fmt.Errorf("failed to load largeBlobs at %s: %v", largePrefix, err)
	}
	largeSubber, ok := large.(subFetcherStorage)
	if !ok {
		return nil, fmt.Errorf("largeBlobs at %q of type %T doesn't support fetching sub-ranges of blobs",
			largePrefix, large)
	}
	meta, err := sorted.NewKeyValue(metaConf)
	if err != nil {
		return nil, fmt.Errorf("failed to setup blobpacked metaIndex: %v", err)
	}
	sto := &storage{
		small: small,
		large: largeSubber,
		meta:  meta,
	}
	sto.init()

	// Check for a weird state: zip files exist, but no metadata about them
	// is recorded. This is probably a corrupt state, and the user likely
	// wants to recover.
	if !sto.anyMeta() && sto.anyZipPacks() {
		log.Printf("Warning: blobpacked storage detects non-zero packed zips, but no metadata. Please re-start in recovery mode.")
		// TODO: add a recovery mode.
		// Old TODO was:
		// fail with a "known corrupt" message and refuse to
		// start unless in recovery mode (perhaps a new environment
		// var? or flag passed down?) using StreamBlobs starting at
		// "l:".  Could even do it automatically if total size is
		// small or fast enough? But that's confusing if it only
		// sometimes finishes recovery. We probably want various
		// server start-up modes anyway: "check", "recover", "garbage
		// collect", "readonly".  So might as well introduce that
		// concept now.

		// TODO: test start-up recovery mode, once it works.
	}
	return sto, nil
}
Example #8
0
// newStorage returns a new storage in path root with the given maxFileSize,
// or defaultMaxFileSize (512MB) if <= 0
func newStorage(root string, maxFileSize int64, indexConf jsonconfig.Obj) (s *storage, err error) {
	fi, err := os.Stat(root)
	if os.IsNotExist(err) {
		return nil, fmt.Errorf("storage root %q doesn't exist", root)
	}
	if err != nil {
		return nil, fmt.Errorf("Failed to stat directory %q: %v", root, err)
	}
	if !fi.IsDir() {
		return nil, fmt.Errorf("storage root %q exists but is not a directory.", root)
	}
	var index sorted.KeyValue
	if len(indexConf) > 0 {
		index, err = sorted.NewKeyValue(indexConf)
	} else {
		index, err = kvfile.NewStorage(filepath.Join(root, indexKV))
	}
	if err != nil {
		return nil, err
	}
	defer func() {
		if err != nil {
			index.Close()
		}
	}()
	if maxFileSize <= 0 {
		maxFileSize = defaultMaxFileSize
	}
	// Be consistent with trailing slashes.  Makes expvar stats for total
	// reads/writes consistent across diskpacked targets, regardless of what
	// people put in their low level config.
	root = strings.TrimRight(root, `\/`)
	s = &storage{
		root:         root,
		index:        index,
		maxFileSize:  maxFileSize,
		Generationer: local.NewGenerationer(root),
	}
	s.mu.Lock()
	defer s.mu.Unlock()
	if err := s.openAllPacks(); err != nil {
		return nil, err
	}
	if _, _, err := s.StorageGeneration(); err != nil {
		return nil, fmt.Errorf("Error initialization generation for %q: %v", root, err)
	}
	return s, nil
}
// TestMongoKV tests against a real MongoDB instance, using a Docker container.
func TestMongoKV(t *testing.T) {
	// SetupMongoContainer may skip or fatal the test if docker isn't found or something goes wrong when setting up the container.
	// Thus, no error is returned
	containerID, ip := dockertest.SetupMongoContainer(t)
	defer containerID.KillRemove(t)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mongo",
		"host":     ip,
		"database": "camlitest",
	})
	if err != nil {
		t.Fatalf("mongo.NewKeyValue = %v", err)
	}
	kvtest.TestSorted(t, kv)
}
Example #10
0
func TestKvfileKV(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "camlistore-kvfilekv_test")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)
	dbname := filepath.Join(tmpDir, "testdb.kvfile")
	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type": "kv",
		"file": dbname,
	})
	if err != nil {
		t.Fatalf("Could not create kvfile sorted kv at %v: %v", dbname, err)
	}
	kvtest.TestSorted(t, kv)
}
Example #11
0
func TestLeveldbKV(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "camlistore-leveldbkv_test")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)
	dbname := filepath.Join(tmpDir, "testdb.leveldb")
	t.Logf("Testing leveldb %q.", dbname)
	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type": "leveldb",
		"file": dbname,
	})
	if err != nil {
		t.Fatalf("Could not create leveldb sorted kv at %v: %v", dbname, err)
	}
	kvtest.TestSorted(t, kv)
}
Example #12
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
	sto := &nsto{}
	invConf := config.RequiredObject("inventory")
	masterName := config.RequiredString("storage")
	if err := config.Validate(); err != nil {
		return nil, err
	}
	sto.inventory, err = sorted.NewKeyValue(invConf)
	if err != nil {
		return nil, fmt.Errorf("Invalid 'inventory' configuration: %v", err)
	}
	sto.master, err = ld.GetStorage(masterName)
	if err != nil {
		return nil, fmt.Errorf("Invalid 'storage' configuration: %v", err)
	}
	return sto, nil
}
Example #13
0
// TestMySQLKV tests against a real MySQL instance, using a Docker container.
func TestMySQLKV(t *testing.T) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupMySQLContainer(t, dbname)
	defer containerID.KillRemove(t)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mysql",
		"host":     ip + ":3306",
		"database": dbname,
		"user":     dockertest.MySQLUsername,
		"password": dockertest.MySQLPassword,
	})
	if err != nil {
		t.Fatalf("mysql.NewKeyValue = %v", err)
	}
	kvtest.TestSorted(t, kv)
}
Example #14
0
func newMongoSorted(t *testing.T) (kv sorted.KeyValue, cleanup func()) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupMongoContainer(t)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mongo",
		"host":     ip,
		"database": dbname,
	})
	if err != nil {
		containerID.KillRemove(t)
		t.Fatal(err)
	}
	return kv, func() {
		kv.Close()
		containerID.KillRemove(t)
	}
}
Example #15
0
// TestPostgreSQLKV tests against a real PostgreSQL instance, using a Docker container.
func TestPostgreSQLKV(t *testing.T) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupPostgreSQLContainer(t, dbname)
	defer containerID.KillRemove(t)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "postgres",
		"host":     ip,
		"database": dbname,
		"user":     dockertest.PostgresUsername,
		"password": dockertest.PostgresPassword,
		"sslmode":  "disable",
	})
	if err != nil {
		t.Fatalf("postgres.NewKeyValue = %v", err)
	}
	kvtest.TestSorted(t, kv)
}
Example #16
0
func newSorted(t *testing.T) (kv sorted.KeyValue, clean func()) {
	f, err := ioutil.TempFile("", "sqlite-test")
	if err != nil {
		t.Fatal(err)
	}

	kv, err = sorted.NewKeyValue(jsonconfig.Obj{
		"type": "sqlite",
		"file": f.Name(),
	})
	if err != nil {
		t.Fatal(err)
	}
	return kv, func() {
		kv.Close()
		os.Remove(f.Name())
	}
}
func TestSQLiteKV(t *testing.T) {
	if !CompiledIn() {
		t.Skip(ErrNotCompiled.Error())
	}
	tmpDir, err := ioutil.TempDir("", "camlistore-sqlitekv_test")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpDir)
	dbname := filepath.Join(tmpDir, "testdb.sqlite")
	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type": "sqlite",
		"file": dbname,
	})
	if err != nil {
		t.Fatalf("Could not create sqlite sorted kv at %v: %v", dbname, err)
	}
	kvtest.TestSorted(t, kv)
}
Example #18
0
func newMySQLSorted(t *testing.T) (kv sorted.KeyValue, clean func()) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupMySQLContainer(t, dbname)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mysql",
		"host":     ip + ":3306",
		"database": dbname,
		"user":     dockertest.MySQLUsername,
		"password": dockertest.MySQLPassword,
	})
	if err != nil {
		containerID.KillRemove(t)
		t.Fatal(err)
	}
	return kv, func() {
		kv.Close()
		containerID.KillRemove(t)
	}
}
// TestMySQLKV tests against a real MySQL instance, using a Docker container.
func TestMySQLKV(t *testing.T) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupMySQLContainer(t, dbname)
	defer containerID.KillRemove(t)

	// TODO(mpl): add test for serverVersion once we host the docker image ourselves
	// (and hence have the control over the version).

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mysql",
		"host":     ip + ":3306",
		"database": dbname,
		"user":     dockertest.MySQLUsername,
		"password": dockertest.MySQLPassword,
	})
	if err != nil {
		t.Fatalf("mysql.NewKeyValue = %v", err)
	}
	kvtest.TestSorted(t, kv)
}
func newPostgresSorted(t *testing.T) (kv sorted.KeyValue, clean func()) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupPostgreSQLContainer(t, dbname)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "postgres",
		"host":     ip,
		"database": dbname,
		"user":     dockertest.PostgresUsername,
		"password": dockertest.PostgresPassword,
		"sslmode":  "disable",
	})
	if err != nil {
		containerID.KillRemove(t)
		t.Fatal(err)
	}
	return kv, func() {
		kv.Close()
		containerID.KillRemove(t)
	}
}
Example #21
0
func TestRollback(t *testing.T) {
	dbname := "camlitest_" + osutil.Username()
	containerID, ip := dockertest.SetupMySQLContainer(t, dbname)
	defer containerID.KillRemove(t)

	kv, err := sorted.NewKeyValue(jsonconfig.Obj{
		"type":     "mysql",
		"host":     ip + ":3306",
		"database": dbname,
		"user":     dockertest.MySQLUsername,
		"password": dockertest.MySQLPassword,
	})
	if err != nil {
		t.Fatalf("mysql.NewKeyValue = %v", err)
	}

	tooLargeAKey := make([]byte, sorted.MaxKeySize+10)
	for i := range tooLargeAKey {
		tooLargeAKey[i] = 'L'
	}

	nbConnections := 2
	tick := time.AfterFunc(5*time.Second, func() {
		// We have to force close the connection, otherwise the connection hogging does not even
		// let us exit the func with t.Fatal (How? why?)
		kv.(*keyValue).DB.Close()
		t.Fatal("Test failed because SQL connections blocked by unrolled transactions")
	})
	kv.(*keyValue).DB.SetMaxOpenConns(nbConnections)
	for i := 0; i < nbConnections+1; i++ {
		b := kv.BeginBatch()
		// Making the transaction fail, to force a rollback
		// -> this whole test fails before we introduce the rollback in CommitBatch.
		b.Set(string(tooLargeAKey), "whatever")
		if err := kv.CommitBatch(b); err == nil {
			t.Fatal("wanted failed commit because too large a key")
		}
	}
	tick.Stop()
}
Example #22
0
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (bs blobserver.Storage, err error) {
	metaConf := config.RequiredObject("metaIndex")
	sto := &storage{}
	agreement := config.OptionalString("I_AGREE", "")
	const wantAgreement = "that encryption support hasn't been peer-reviewed, isn't finished, and its format might change."
	if agreement != wantAgreement {
		return nil, errors.New("Use of the 'encrypt' target without the proper I_AGREE value.")
	}

	key := config.OptionalString("key", "")
	keyFile := config.OptionalString("keyFile", "")
	var keyb []byte
	switch {
	case key != "":
		keyb, err = hex.DecodeString(key)
		if err != nil || len(keyb) != 16 {
			return nil, fmt.Errorf("The 'key' parameter must be 16 bytes of 32 hex digits. (currently fixed at AES-128)")
		}
	case keyFile != "":
		// TODO: check that keyFile's unix permissions aren't too permissive.
		keyb, err = ioutil.ReadFile(keyFile)
		if err != nil {
			return nil, fmt.Errorf("Reading key file %v: %v", keyFile, err)
		}
	}
	blobStorage := config.RequiredString("blobs")
	metaStorage := config.RequiredString("meta")
	if err := config.Validate(); err != nil {
		return nil, err
	}

	sto.index, err = sorted.NewKeyValue(metaConf)
	if err != nil {
		return
	}

	sto.blobs, err = ld.GetStorage(blobStorage)
	if err != nil {
		return
	}
	sto.meta, err = ld.GetStorage(metaStorage)
	if err != nil {
		return
	}

	if keyb == nil {
		// TODO: add a way to prompt from stdin on start? or keychain support?
		return nil, errors.New("no encryption key set with 'key' or 'keyFile'")
	}

	if err := sto.setKey(keyb); err != nil {
		return nil, err
	}

	start := time.Now()
	log.Printf("Reading encryption metadata...")
	if err := sto.readAllMetaBlobs(); err != nil {
		return nil, fmt.Errorf("Error scanning metadata on start-up: %v", err)
	}
	log.Printf("Read all encryption metadata in %.3f seconds", time.Since(start).Seconds())

	return sto, nil
}
Example #23
0
func newPublishHandler(conf *config) *publishHandler {
	cl, err := app.Client()
	if err != nil {
		logger.Fatalf("could not get a client for the publish handler %v", err)
	}
	if conf.RootName == "" {
		logger.Fatal("camliRoot not found in the app configuration")
	}
	maxResizeBytes := conf.MaxResizeBytes
	if maxResizeBytes == 0 {
		maxResizeBytes = constants.DefaultMaxResizeMem
	}
	var CSSFiles, JSDeps []string
	if conf.SourceRoot != "" {
		appRoot := filepath.Join(conf.SourceRoot, "app", "publisher")
		Files = &fileembed.Files{
			DirFallback: appRoot,
		}
		// TODO(mpl): Can I readdir by listing with "/" on Files, even with DirFallBack?
		// Apparently not, but retry later.
		dir, err := os.Open(appRoot)
		if err != nil {
			logger.Fatal(err)
		}
		defer dir.Close()
		names, err := dir.Readdirnames(-1)
		if err != nil {
			logger.Fatal(err)
		}
		for _, v := range names {
			if strings.HasSuffix(v, ".css") {
				CSSFiles = append(CSSFiles, v)
				continue
			}
			// TODO(mpl): document or fix (use a map?) the ordering
			// problem: i.e. jquery.js must be sourced before
			// publisher.js. For now, just cheat by sorting the
			// slice.
			if strings.HasSuffix(v, ".js") {
				JSDeps = append(JSDeps, v)
			}
		}
		sort.Strings(JSDeps)
	} else {
		Files.Listable = true
		dir, err := Files.Open("/")
		if err != nil {
			logger.Fatal(err)
		}
		defer dir.Close()
		fis, err := dir.Readdir(-1)
		if err != nil {
			logger.Fatal(err)
		}
		for _, v := range fis {
			name := v.Name()
			if strings.HasSuffix(name, ".css") {
				CSSFiles = append(CSSFiles, name)
				continue
			}
			if strings.HasSuffix(name, ".js") {
				JSDeps = append(JSDeps, name)
			}
		}
		sort.Strings(JSDeps)
	}
	// TODO(mpl): add all htmls found in Files to the template if none specified?
	if conf.GoTemplate == "" {
		logger.Fatal("a go template is required in the app configuration")
	}
	goTemplate, err := goTemplate(Files, conf.GoTemplate)
	if err != nil {
		logger.Fatal(err)
	}

	var cache blobserver.Storage
	var thumbMeta *server.ThumbMeta
	if conf.CacheRoot != "" {
		cache, err = localdisk.New(conf.CacheRoot)
		if err != nil {
			logger.Fatalf("Could not create localdisk cache: %v", err)
		}
		thumbsCacheDir := filepath.Join(os.TempDir(), "camli-publisher-cache")
		if err := os.MkdirAll(thumbsCacheDir, 0700); err != nil {
			logger.Fatalf("Could not create cache dir %s for %v publisher: %v", thumbsCacheDir, conf.RootName, err)
		}
		kv, err := sorted.NewKeyValue(map[string]interface{}{
			"type": "kv",
			"file": filepath.Join(thumbsCacheDir, conf.RootName+"-thumbnails.kv"),
		})
		if err != nil {
			logger.Fatalf("Could not create kv for %v's thumbs cache: %v", conf.RootName, err)
		}
		thumbMeta = server.NewThumbMeta(kv)
	}

	return &publishHandler{
		rootName:       conf.RootName,
		cl:             cl,
		resizeSem:      syncutil.NewSem(maxResizeBytes),
		staticFiles:    Files,
		goTemplate:     goTemplate,
		CSSFiles:       CSSFiles,
		JSDeps:         JSDeps,
		describedCache: make(map[string]*search.DescribedBlob),
		cache:          cache,
		thumbMeta:      thumbMeta,
	}
}
Example #24
0
// newKVOrNil wraps sorted.NewKeyValue and adds the ability
// to pass a nil conf to get a (nil, nil) response.
func newKVOrNil(conf jsonconfig.Obj) (sorted.KeyValue, error) {
	if len(conf) == 0 {
		return nil, nil
	}
	return sorted.NewKeyValue(conf)
}
Example #25
0
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
	var (
		from           = conf.RequiredString("from")
		to             = conf.RequiredString("to")
		fullSync       = conf.OptionalBool("fullSyncOnStart", false)
		blockFullSync  = conf.OptionalBool("blockingFullSyncOnStart", false)
		idle           = conf.OptionalBool("idle", false)
		queueConf      = conf.OptionalObject("queue")
		copierPoolSize = conf.OptionalInt("copierPoolSize", 5)
		validate       = conf.OptionalBool("validateOnStart", validateOnStartDefault)
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	if idle {
		return newIdleSyncHandler(from, to), nil
	}
	if len(queueConf) == 0 {
		return nil, errors.New(`Missing required "queue" object`)
	}
	q, err := sorted.NewKeyValue(queueConf)
	if err != nil {
		return nil, err
	}

	isToIndex := false
	fromBs, err := ld.GetStorage(from)
	if err != nil {
		return nil, err
	}
	toBs, err := ld.GetStorage(to)
	if err != nil {
		return nil, err
	}
	if _, ok := fromBs.(*index.Index); !ok {
		if _, ok := toBs.(*index.Index); ok {
			isToIndex = true
		}
	}

	sh := newSyncHandler(from, to, fromBs, toBs, q)
	sh.toIndex = isToIndex
	sh.copierPoolSize = copierPoolSize
	if err := sh.readQueueToMemory(); err != nil {
		return nil, fmt.Errorf("Error reading sync queue to memory: %v", err)
	}

	if fullSync || blockFullSync {
		sh.logf("Doing full sync")
		didFullSync := make(chan bool, 1)
		go func() {
			for {
				n := sh.runSync("queue", sh.enumeratePendingBlobs)
				if n > 0 {
					sh.logf("Queue sync copied %d blobs", n)
					continue
				}
				break
			}
			n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
			sh.logf("Full sync copied %d blobs", n)
			didFullSync <- true
			sh.syncLoop()
		}()
		if blockFullSync {
			sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
			<-didFullSync
			sh.logf("Full sync complete.")
		}
	} else {
		go sh.syncLoop()
	}

	if validate {
		go sh.startFullValidation()
	}

	blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
	return sh, nil
}
Example #26
0
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
	var (
		from          = conf.RequiredString("from")
		to            = conf.RequiredString("to")
		fullSync      = conf.OptionalBool("fullSyncOnStart", false)
		blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false)
		idle          = conf.OptionalBool("idle", false)
		queueConf     = conf.OptionalObject("queue")
	)
	if err := conf.Validate(); err != nil {
		return nil, err
	}
	if idle {
		synch, err := createIdleSyncHandler(from, to)
		if err != nil {
			return nil, err
		}
		return synch, nil
	}
	if len(queueConf) == 0 {
		return nil, errors.New(`Missing required "queue" object`)
	}
	q, err := sorted.NewKeyValue(queueConf)
	if err != nil {
		return nil, err
	}

	isToIndex := false
	fromBs, err := ld.GetStorage(from)
	if err != nil {
		return nil, err
	}
	toBs, err := ld.GetStorage(to)
	if err != nil {
		return nil, err
	}
	if _, ok := fromBs.(*index.Index); !ok {
		if _, ok := toBs.(*index.Index); ok {
			isToIndex = true
		}
	}

	sh, err := createSyncHandler(from, to, fromBs, toBs, q, isToIndex)
	if err != nil {
		return nil, err
	}

	if fullSync || blockFullSync {
		didFullSync := make(chan bool, 1)
		go func() {
			n := sh.runSync("queue", sh.enumerateQueuedBlobs)
			sh.logf("Queue sync copied %d blobs", n)
			n = sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
			sh.logf("Full sync copied %d blobs", n)
			didFullSync <- true
			sh.syncQueueLoop()
		}()
		if blockFullSync {
			sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
			<-didFullSync
			sh.logf("Full sync complete.")
		}
	} else {
		go sh.syncQueueLoop()
	}

	blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
	return sh, nil
}