Ejemplo n.º 1
0
func TestMetadataRepoForIndexDefn(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	gometaL.LogEnable()
	gometaL.SetLogLevel(gometaL.LogLevelTrace)
	gometaL.SetPrefix("Indexing/Gometa")

	logging.Infof("Start TestMetadataRepo *********************************************************")

	/*
		var addr = "localhost:9885"
		var leader = "localhost:9884"

		repo, err := manager.NewMetadataRepo(addr, leader, "./config.json", nil)
		if err != nil {
			t.Fatal(err)
		}
		runTest(repo, t)
	*/

	os.MkdirAll("./data/", os.ModePerm)
	repo, _, err := manager.NewLocalMetadataRepo("localhost:5002", nil, nil, "./data/MetadataStore")
	if err != nil {
		t.Fatal(err)
	}
	runTest(repo, t)
}
Ejemplo n.º 2
0
// NewGSIIndexer manage new set of indexes under namespace->keyspace,
// also called as, pool->bucket.
// will return an error when,
// - GSI cluster is not available.
// - network partitions / errors.
func NewGSIIndexer(
	clusterURL, namespace, keyspace string) (datastore.Indexer, errors.Error) {

	l.SetLogLevel(l.Info)

	gsi := &gsiKeyspace{
		clusterURL:     clusterURL,
		namespace:      namespace,
		keyspace:       keyspace,
		indexes:        make(map[uint64]*secondaryIndex), // defnID -> index
		primaryIndexes: make(map[uint64]*secondaryIndex),
	}
	gsi.logPrefix = fmt.Sprintf("GSIC[%s; %s]", namespace, keyspace)

	// get the singleton-client
	client, err := getSingletonClient(clusterURL)
	if err != nil {
		l.Errorf("%v GSI instantiation failed: %v", gsi.logPrefix, err)
		return nil, errors.NewError(err, "GSI client instantiation failed")
	}
	gsi.gsiClient = client
	// refresh indexes for this service->namespace->keyspace
	if err := gsi.Refresh(); err != nil {
		l.Errorf("%v Refresh() failed: %v", gsi.logPrefix, err)
		return nil, err
	}
	l.Debugf("%v instantiated ...", gsi.logPrefix)
	return gsi, nil
}
Ejemplo n.º 3
0
func main() {
	logging.SetLogLevel(logging.Warn)
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdOptions, _, fset, err := querycmd.ParseArgs(os.Args[1:])
	if err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	} else if cmdOptions.Help || len(cmdOptions.OpType) < 1 {
		usage(fset)
		os.Exit(0)
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	client, err := qclient.NewGsiClient(cmdOptions.Server, config)
	if err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	}

	if err = querycmd.HandleCommand(client, cmdOptions, false, os.Stdout); err != nil {
		fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
	}
	client.Close()
}
Ejemplo n.º 4
0
func main() {
	logging.SetLogLevel(logging.Error)
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdOptions, args, fset, err := querycmd.ParseArgs(os.Args[1:])
	if err != nil {
		logging.Fatalf("%v", err)
		os.Exit(0)
	} else if cmdOptions.Help {
		usage(fset)
		os.Exit(0)
	} else if len(args) < 1 {
		logging.Fatalf("%v", "specify a command")
	}

	b, err := c.ConnectBucket(cmdOptions.Server, "default", "default")
	if err != nil {
		log.Fatal(err)
	}
	defer b.Close()
	maxvb, err := c.MaxVbuckets(b)
	if err != nil {
		log.Fatal(err)
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	client, err := qclient.NewGsiClient(cmdOptions.Server, config)
	if err != nil {
		log.Fatal(err)
	}

	switch args[0] {
	case "sanity":
		err = doSanityTests(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb14786":
		err = doMB14786(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb13339":
		err = doMB13339(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "benchmark":
		doBenchmark(cmdOptions.Server, "localhost:8101")

	case "consistency":
		doConsistency(cmdOptions.Server, maxvb, client)
	}
	client.Close()
}
Ejemplo n.º 5
0
func argParse() string {
	buckets := "default"
	endpoints := "localhost:9020"

	flag.StringVar(&buckets, "buckets", buckets,
		"buckets to project")
	flag.StringVar(&endpoints, "endpoints", endpoints,
		"endpoints for mutations stream")
	flag.StringVar(&options.coordEndpoint, "coorendp", "localhost:9021",
		"coordinator endpoint")
	flag.IntVar(&options.stat, "stat", 1000,
		"periodic timeout to print dataport statistics")
	flag.IntVar(&options.timeout, "timeout", 0,
		"timeout for dataport to exit")
	flag.IntVar(&options.maxVbnos, "maxvb", 1024,
		"max number of vbuckets")
	flag.StringVar(&options.auth, "auth", "Administrator:asdasd",
		"Auth user and password")
	flag.BoolVar(&options.projector, "projector", false,
		"start projector for debug mode")
	flag.BoolVar(&options.debug, "debug", false,
		"run in debug mode")
	flag.BoolVar(&options.trace, "trace", false,
		"run in trace mode")

	flag.Parse()

	options.buckets = strings.Split(buckets, ",")
	options.endpoints = strings.Split(endpoints, ",")
	if options.debug {
		logging.SetLogLevel(logging.Debug)
	} else if options.trace {
		logging.SetLogLevel(logging.Trace)
	} else {
		logging.SetLogLevel(logging.Info)
	}

	args := flag.Args()
	if len(args) < 1 {
		usage()
		os.Exit(1)
	}
	return args[0]
}
Ejemplo n.º 6
0
func TestStreamBegin(t *testing.T) {
	maxBuckets, maxvbuckets, mutChanSize := 2, 8, 1000
	logging.SetLogLevel(logging.Silent)

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(addr, maxvbuckets, config, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start client
	flags := transport.TransportFlag(0).SetProtobuf()
	prefix = "projector.dataport.client."
	config = c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	client, _ := NewClient(
		"cluster", "backfill", addr, flags, maxvbuckets, config)
	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps
	for i := 0; i < maxBuckets; i++ {
		if err := client.SendVbmap(vbmaps[i]); err != nil {
			t.Fatal(err)
		}
	}

	// test a live StreamBegin
	bucket, vbno, vbuuid := "default0", uint16(maxvbuckets), uint64(1111)
	uuid := c.StreamID(bucket, vbno)
	vals, err := client.Getcontext()
	if err != nil {
		t.Fatal(err)
	}
	vbChans := vals[0].(map[string]chan interface{})
	if _, ok := vbChans[uuid]; ok {
		t.Fatal("duplicate id")
	}
	vb := c.NewVbKeyVersions(bucket, vbno, vbuuid, 1)
	seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
	kv := c.NewKeyVersions(seqno, docid, maxCount)
	kv.AddStreamBegin()
	vb.AddKeyVersions(kv)
	err = client.SendKeyVersions([]*c.VbKeyVersions{vb}, true)
	client.Getcontext() // syncup
	if err != nil {
		t.Fatal(err)
	} else if _, ok := vbChans[uuid]; !ok {
		fmt.Printf("%v %v\n", len(vbChans), uuid)
		t.Fatal("failed StreamBegin")
	}
	client.Close()
	daemon.Close()
}
Ejemplo n.º 7
0
func TestStreamMgr_Timer(t *testing.T) {

	logging.SetLogLevel(logging.Trace)
	util.TT = t

	old_value := manager.NUM_VB
	manager.NUM_VB = 16
	defer func() { manager.NUM_VB = old_value }()

	// Running test
	runTimerTest()
}
Ejemplo n.º 8
0
// ResetConfig accepts a full-set or subset of global configuration
// and updates projector related fields.
func (p *Projector) ResetConfig(config c.Config) {
	p.rw.Lock()
	defer p.rw.Unlock()
	defer logging.Infof("%v\n", c.LogRuntime())

	// reset configuration.
	if cv, ok := config["projector.settings.log_level"]; ok {
		logging.SetLogLevel(logging.Level(cv.String()))
	}
	if cv, ok := config["projector.maxCpuPercent"]; ok {
		c.SetNumCPUs(cv.Int())
	}
	p.config = p.config.Override(config)

	// CPU-profiling
	cpuProfile, ok := config["projector.cpuProfile"]
	if ok && cpuProfile.Bool() && p.cpuProfFd == nil {
		cpuProfFname, ok := config["projector.cpuProfFname"]
		if ok {
			fname := cpuProfFname.String()
			logging.Infof("%v cpu profiling => %q\n", p.logPrefix, fname)
			p.cpuProfFd = p.startCPUProfile(fname)

		} else {
			logging.Errorf("Missing cpu-profile o/p filename\n")
		}

	} else if ok && !cpuProfile.Bool() {
		if p.cpuProfFd != nil {
			pprof.StopCPUProfile()
			logging.Infof("%v cpu profiling stopped\n", p.logPrefix)
		}
		p.cpuProfFd = nil

	} else if ok {
		logging.Warnf("%v cpu profiling already active !!\n", p.logPrefix)
	}

	// MEM-profiling
	memProfile, ok := config["projector.memProfile"]
	if ok && memProfile.Bool() {
		memProfFname, ok := config["projector.memProfFname"]
		if ok {
			fname := memProfFname.String()
			if p.takeMEMProfile(fname) {
				logging.Infof("%v mem profile => %q\n", p.logPrefix, fname)
			}
		} else {
			logging.Errorf("Missing mem-profile o/p filename\n")
		}
	}
}
Ejemplo n.º 9
0
func argParse() string {
	var buckets string

	flag.StringVar(&buckets, "buckets", "default",
		"buckets to listen")
	flag.IntVar(&options.maxVbno, "maxvb", 1024,
		"maximum number of vbuckets")
	flag.IntVar(&options.stats, "stats", 1000,
		"periodic timeout in mS, to print statistics, `0` will disable stats")
	flag.BoolVar(&options.printflogs, "flogs", false,
		"display failover logs")
	flag.StringVar(&options.auth, "auth", "",
		"Auth user and password")
	flag.BoolVar(&options.info, "info", false,
		"display informational logs")
	flag.BoolVar(&options.debug, "debug", false,
		"display debug logs")
	flag.BoolVar(&options.trace, "trace", false,
		"display trace logs")

	flag.Parse()

	options.buckets = strings.Split(buckets, ",")
	if options.debug {
		logging.SetLogLevel(logging.Debug)
	} else if options.trace {
		logging.SetLogLevel(logging.Trace)
	} else {
		logging.SetLogLevel(logging.Info)
	}

	args := flag.Args()
	if len(args) < 1 {
		usage()
		os.Exit(1)
	}
	return args[0]
}
Ejemplo n.º 10
0
func TestRequestHandler(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	logging.Infof("Start TestRequestHandler *********************************************************")

	var config = "./config.json"

	logging.Infof("********** Setup index manager")
	var msgAddr = "localhost:9884"
	var httpAddr = "localhost:9102"
	addrPrv := util.NewFakeAddressProvider(msgAddr, httpAddr)
	mgr, err := manager.NewIndexManagerInternal(addrPrv, nil, cfg)
	if err != nil {
		t.Fatal(err)
	}
	mgr.StartCoordinator(config)
	defer mgr.Close()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("********** Start HTTP Server")
	go func() {
		if err := http.ListenAndServe(":9102", nil); err != nil {
			t.Fatal("Fail to start HTTP server on :9102")
		}
	}()

	logging.Infof("********** Cleanup Old Test")
	cleanupRequestHandlerTest(mgr, t)
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("********** Start running request handler test")
	createIndexRequest(t)
	dropIndexRequest(t)

	logging.Infof("********** Cleanup Test")
	cleanupRequestHandlerTest(mgr, t)
	mgr.CleanupTopology()
	mgr.CleanupStabilityTimestamp()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Done TestRequestHandler. Tearing down *********************************************************")
	mgr.Close()
	time.Sleep(time.Duration(1000) * time.Millisecond)
}
Ejemplo n.º 11
0
func argParse() []string {
	flag.StringVar(&options.auth, "auth", "",
		"Auth user and password")
	flag.BoolVar(&options.debug, "debug", false,
		"run in debug mode")
	flag.BoolVar(&options.trace, "trace", false,
		"run in trace mode")

	flag.Parse()

	if options.debug {
		logging.SetLogLevel(logging.Debug)
	} else if options.trace {
		logging.SetLogLevel(logging.Trace)
	} else {
		logging.SetLogLevel(logging.Info)
	}

	args := flag.Args()
	if len(args) < 1 {
		os.Exit(1)
	}
	return strings.Split(args[0], ",")
}
Ejemplo n.º 12
0
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	common.SeedProcess()

	logging.Infof("Indexer started with command line: %v\n", os.Args)
	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	forestdb.Log = &logging.SystemLogger

	// setup cbauth
	if *auth != "" {
		up := strings.Split(*auth, ":")
		logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
		if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	go platform.DumpOnSignal()
	go common.ExitOnStdinClose()

	config := common.SystemConfig
	config.SetValue("indexer.clusterAddr", *cluster)
	config.SetValue("indexer.numVbuckets", *numVbuckets)
	config.SetValue("indexer.enableManager", *enableManager)
	config.SetValue("indexer.adminPort", *adminPort)
	config.SetValue("indexer.scanPort", *scanPort)
	config.SetValue("indexer.httpPort", *httpPort)
	config.SetValue("indexer.streamInitPort", *streamInitPort)
	config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
	config.SetValue("indexer.streamMaintPort", *streamMaintPort)
	config.SetValue("indexer.storage_dir", *storageDir)

	storage_dir := config["indexer.storage_dir"].String()
	if err := os.MkdirAll(storage_dir, 0755); err != nil {
		common.CrashOnError(err)
	}

	_, msg := indexer.NewIndexer(config)

	if msg.GetMsgType() != indexer.MSG_SUCCESS {
		logging.Warnf("Indexer Failure to Init %v", msg)
	}

	logging.Infof("Indexer exiting normally\n")
}
Ejemplo n.º 13
0
func TestStreamMgr_Monitor(t *testing.T) {

	logging.SetLogLevel(logging.Trace)
	util.TT = t

	old_value := manager.NUM_VB
	manager.NUM_VB = 16
	defer func() { manager.NUM_VB = old_value }()

	old_interval := manager.MONITOR_INTERVAL
	manager.MONITOR_INTERVAL = time.Duration(1000) * time.Millisecond
	defer func() { manager.MONITOR_INTERVAL = old_interval }()

	// Running test
	runMonitorTest()
}
Ejemplo n.º 14
0
func init() {
	log.Printf("In init()")
	logging.SetLogLevel(logging.Warn)

	var configpath string
	flag.StringVar(&configpath, "cbconfig", "../config/clusterrun_conf.json", "Path of the configuration file with data about Couchbase Cluster")
	flag.Parse()
	clusterconfig = tc.GetClusterConfFromFile(configpath)
	kvaddress = clusterconfig.KVAddress
	indexManagementAddress = clusterconfig.KVAddress
	indexScanAddress = clusterconfig.KVAddress
	seed = 1
	proddir, bagdir = tc.FetchMonsterToolPath()

	// setup cbauth
	if _, err := cbauth.InternalRetryDefaultInit(kvaddress, clusterconfig.Username, clusterconfig.Password); err != nil {
		log.Fatalf("Failed to initialize cbauth: %s", err)
	}
	secondaryindex.CheckCollation = true
	e := secondaryindex.DropAllSecondaryIndexes(indexManagementAddress)
	tc.HandleError(e, "Error in DropAllSecondaryIndexes")

	time.Sleep(5 * time.Second)
	// Working with Users10k and Users_mut dataset.
	u, _ := user.Current()
	dataFilePath = filepath.Join(u.HomeDir, "testdata/Users10k.txt.gz")
	mutationFilePath = filepath.Join(u.HomeDir, "testdata/Users_mut.txt.gz")
	tc.DownloadDataFile(tc.IndexTypesStaticJSONDataS3, dataFilePath, true)
	tc.DownloadDataFile(tc.IndexTypesMutationJSONDataS3, mutationFilePath, true)
	docs = datautility.LoadJSONFromCompressedFile(dataFilePath, "docid")
	mut_docs = datautility.LoadJSONFromCompressedFile(mutationFilePath, "docid")
	log.Printf("Emptying the default bucket")
	kvutility.EnableBucketFlush("default", "", clusterconfig.Username, clusterconfig.Password, kvaddress)
	kvutility.FlushBucket("default", "", clusterconfig.Username, clusterconfig.Password, kvaddress)
	time.Sleep(5 * time.Second)

	log.Printf("Create Index On the empty default Bucket()")
	var indexName = "index_eyeColor"
	var bucketName = "default"

	err := secondaryindex.CreateSecondaryIndex(indexName, bucketName, indexManagementAddress, "", []string{"eyeColor"}, false, nil, true, defaultIndexActiveTimeout, nil)
	tc.HandleError(err, "Error in creating the index")

	// Populate the bucket now
	log.Printf("Populating the default bucket")
	kvutility.SetKeyValues(docs, "default", "", clusterconfig.KVAddress)
}
Ejemplo n.º 15
0
func init() {
	log.Printf("In init()")

	logging.SetLogLevel(logging.Warn)
	var configpath string
	seed = 1
	flag.StringVar(&configpath, "cbconfig", "../config/clusterrun_conf.json", "Path of the configuration file with data about Couchbase Cluster")
	flag.Parse()
	clusterconfig = tc.GetClusterConfFromFile(configpath)
	kvaddress = clusterconfig.KVAddress
	indexManagementAddress = clusterconfig.KVAddress
	indexScanAddress = clusterconfig.KVAddress

	// setup cbauth
	if _, err := cbauth.InternalRetryDefaultInit(kvaddress, clusterconfig.Username, clusterconfig.Password); err != nil {
		log.Fatalf("Failed to initialize cbauth: %s", err)
	}

	proddir, bagdir = tc.FetchMonsterToolPath()
}
Ejemplo n.º 16
0
func (gsi *gsiKeyspace) SetLogLevel(level qlog.Level) {
	switch level {
	case qlog.NONE:
		l.SetLogLevel(l.Silent)
	case qlog.SEVERE:
		l.SetLogLevel(l.Fatal)
	case qlog.ERROR:
		l.SetLogLevel(l.Error)
	case qlog.WARN:
		l.SetLogLevel(l.Warn)
	case qlog.INFO:
		l.SetLogLevel(l.Info)
	case qlog.REQUEST:
		l.SetLogLevel(l.Timing)
	case qlog.TRACE:
		l.SetLogLevel(l.Debug) //reversed
	case qlog.DEBUG:
		l.SetLogLevel(l.Trace)
	default:
		l.Warnf("Unknown query log level '%v'", level)
	}
}
Ejemplo n.º 17
0
func TestClient(t *testing.T) {
	maxBuckets, maxvbuckets, mutChanSize := 2, 8, 1000
	logging.SetLogLevel(logging.Silent)

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(addr, maxvbuckets, config, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start client and test number of connection.
	flags := transport.TransportFlag(0).SetProtobuf()
	prefix = "projector.dataport.client."
	config = c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	maxconns := config["parConnections"].Int()
	client, err := NewClient(
		"cluster", "backfill", addr, flags, maxvbuckets, config)
	if err != nil {
		t.Fatal(err)
	} else if len(client.conns) != maxconns {
		t.Fatal("failed dataport client connections")
	} else if len(client.conns) != len(client.connChans) {
		t.Fatal("failed dataport client connection channels")
	} else if len(client.conns) != len(client.conn2Vbs) {
		t.Fatal("failed dataport client connection channels")
	} else {
		vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps
		for i := 0; i < maxBuckets; i++ {
			if err := client.SendVbmap(vbmaps[i]); err != nil {
				t.Fatal(err)
			}
		}
		validateClientInstance(client, maxvbuckets, maxconns, maxBuckets, t)
	}
	client.Close()
	daemon.Close()
}
Ejemplo n.º 18
0
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	c.SeedProcess()

	logging.Infof("Projector started with command line: %v\n", os.Args)

	cluster := argParse() // eg. "localhost:9000"

	config := c.SystemConfig.Clone()
	logging.SetLogLevel(logging.Level(options.loglevel))

	config.SetValue("maxVbuckets", options.numVbuckets)
	if f := getlogFile(); f != nil {
		fmt.Printf("Projector logging to %q\n", f.Name())
		logging.SetLogWriter(f)
		config.SetValue("log.file", f.Name())
	}
	config.SetValue("projector.clusterAddr", cluster)
	config.SetValue("projector.adminport.listenAddr", options.adminport)

	// setup cbauth
	if options.auth != "" {
		up := strings.Split(options.auth, ":")
		if _, err := cbauth.InternalRetryDefaultInit(cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	epfactory := NewEndpointFactory(cluster, options.numVbuckets)
	config.SetValue("projector.routerEndpointFactory", epfactory)

	logging.Infof("%v\n", c.LogOs())
	logging.Infof("%v\n", c.LogRuntime())

	go c.ExitOnStdinClose()
	projector.NewProjector(options.numVbuckets, config)

	<-done
}
Ejemplo n.º 19
0
func BenchmarkClientRequest(b *testing.B) {
	logging.SetLogLevel(logging.Silent)
	urlPrefix := common.SystemConfig["projector.adminport.urlPrefix"].String()
	client := NewHTTPClient(addr, urlPrefix)
	req := &testMessage{
		DefnID:          uint64(0x1234567812345678),
		Bucket:          "default",
		IsPrimary:       false,
		IName:           "example-index",
		Using:           "forrestdb",
		ExprType:        "n1ql",
		PartitionScheme: "simplekeypartition",
		Expression:      "x+1",
	}
	resp := &testMessage{}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if err := client.Request(req, resp); err != nil {
			b.Error(err)
		}
	}
}
Ejemplo n.º 20
0
func setLogger(config common.Config) {
	logLevel := config["indexer.settings.log_level"].String()
	level := logging.Level(logLevel)
	logging.Infof("Setting log level to %v", level)
	logging.SetLogLevel(level)
}
Ejemplo n.º 21
0
func init() {
	logging.SetLogLevel(logging.Silent)
	server = doServer("http://"+addr, q)
}
Ejemplo n.º 22
0
func TestCoordinator(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	logging.Infof("Start TestCoordinator *********************************************************")

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	/*
		var requestAddr = "localhost:9885"
		var leaderAddr = "localhost:9884"
	*/
	var config = "./config.json"
	manager.USE_MASTER_REPO = true
	defer func() { manager.USE_MASTER_REPO = false }()

	mgr, err := manager.NewIndexManagerInternal("localhost:9886", "localhost:"+manager.COORD_MAINT_STREAM_PORT, nil, cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer mgr.Close()
	mgr.StartCoordinator(config)
	time.Sleep(time.Duration(1000) * time.Millisecond)

	cleanup(mgr, t)
	time.Sleep(time.Duration(1000) * time.Millisecond)

	// Add a new index definition : 200
	idxDefn := &common.IndexDefn{
		DefnId:          common.IndexDefnId(200),
		Name:            "coordinator_test",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		PartitionScheme: common.HASH,
		PartitionKey:    "Testing"}

	err = mgr.HandleCreateIndexDDL(idxDefn)
	if err != nil {
		t.Fatal(err)
	}
	time.Sleep(time.Duration(1000) * time.Millisecond)

	idxDefn, err = mgr.GetIndexDefnById(common.IndexDefnId(200))
	if err != nil {
		t.Fatal(err)
	}

	if idxDefn == nil {
		t.Fatal("Cannot find index definition")
	}

	topology, err := mgr.GetTopologyByBucket("Default")
	if err != nil {
		t.Fatal(err)
	}
	content, err := manager.MarshallIndexTopology(topology)
	if err != nil {
		t.Fatal(err)
	}
	logging.Infof("Topology after index creation : %s", string(content))

	inst := topology.GetIndexInstByDefn(common.IndexDefnId(200))
	if inst == nil || common.IndexState(inst.State) != common.INDEX_STATE_READY {
		t.Fatal("Index Inst not found for index defn 200 or inst state is not in READY")
	}

	cleanup(mgr, t)
	mgr.CleanupTopology()
	mgr.CleanupStabilityTimestamp()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Done TestCoordinator. Tearing down *********************************************************")
	mgr.Close()
	time.Sleep(time.Duration(1000) * time.Millisecond)
}
Ejemplo n.º 23
0
func main() {
	help := flag.Bool("help", false, "Help")
	config := flag.String("configfile", "config.json", "Scan load config file")
	outfile := flag.String("resultfile", "results.json", "Result report file")
	cpus := flag.Int("cpus", runtime.NumCPU(), "Number of CPUs")
	cluster := flag.String("cluster", "127.0.0.1:9000", "Cluster server address")
	auth := flag.String("auth", "Administrator:asdasd", "Auth")
	statsfile := flag.String("statsfile", "", "Periodic statistics report file")
	cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
	memprofile := flag.String("memprofile", "", "write mem profile to file")
	logLevel := flag.String("logLevel", "error", "Log Level")

	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	fmt.Println("Log Level =", *logLevel)

	if *help {
		flag.PrintDefaults()
		os.Exit(0)
	}

	if *cpuprofile != "" {
		fd, err := os.Create(*cpuprofile)
		if err != nil {
			fmt.Println("Failed create cpu profile file")
			os.Exit(1)
		}
		pprof.StartCPUProfile(fd)
		defer pprof.StopCPUProfile()
	}
	if *memprofile != "" {
		fd, err := os.Create(*memprofile)
		if err != nil {
			fmt.Println("Failed create mem profile file")
			os.Exit(1)
		}
		defer pprof.WriteHeapProfile(fd)
	}

	runtime.GOMAXPROCS(*cpus)
	up := strings.Split(*auth, ":")
	_, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1])
	if err != nil {
		fmt.Println("Failed to initialize cbauth: %s\n", err)
		os.Exit(1)
	}

	cfg, err := parseConfig(*config)
	handleError(err)

	var statsW io.Writer
	if *statsfile != "" {
		if f, err := os.Create(*statsfile); err != nil {
			handleError(err)
		} else {
			statsW = f
			defer f.Close()
		}
	}

	t0 := time.Now()
	res, err := RunCommands(*cluster, cfg, statsW)
	handleError(err)
	dur := time.Now().Sub(t0)

	totalRows := uint64(0)
	for _, result := range res.ScanResults {
		totalRows += result.Rows
	}
	res.Rows = totalRows
	res.Duration = dur.Seconds() - res.WarmupDuration

	rate := int(float64(totalRows) / res.Duration)

	fmt.Printf("Throughput = %d rows/sec\n", rate)

	os.Remove(*outfile)
	err = writeResults(res, *outfile)
	handleError(err)

}
Ejemplo n.º 24
0
func TestEventMgr(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	logging.Infof("Start TestEventMgr *********************************************************")

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	/*
		var requestAddr = "localhost:9885"
		var leaderAddr = "localhost:9884"
		var config = "./config.json"
	*/

	logging.Infof("Start Index Manager")
	factory := new(util.TestDefaultClientFactory)
	env := new(util.TestDefaultClientEnv)
	admin := manager.NewProjectorAdmin(factory, env, nil)
	//mgr, err := manager.NewIndexManagerInternal(requestAddr, leaderAddr, config, admin)
	mgr, err := manager.NewIndexManagerInternal("localhost:9886", "localhost:"+manager.COORD_MAINT_STREAM_PORT, admin, cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer mgr.Close()

	cleanupEvtMgrTest(mgr, t)
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Start Listening to event")
	notifications, err := mgr.StartListenIndexCreate("TestEventMgr")
	if err != nil {
		t.Fatal(err)
	}

	// Add a new index definition : 300
	idxDefn := &common.IndexDefn{
		DefnId:          common.IndexDefnId(300),
		Name:            "event_mgr_test",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		PartitionScheme: common.HASH,
		PartitionKey:    "Testing"}

	logging.Infof("Before DDL")
	err = mgr.HandleCreateIndexDDL(idxDefn)
	if err != nil {
		t.Fatal(err)
	}

	data := listen(notifications)
	if data == nil {
		t.Fatal("Does not receive notification from watcher")
	}

	idxDefn, err = common.UnmarshallIndexDefn(([]byte)(data))
	if err != nil {
		t.Fatal(err)
	}

	if idxDefn == nil {
		t.Fatal("Cannot unmarshall index definition")
	}

	if idxDefn.Name != "event_mgr_test" {
		t.Fatal("Index Definition Name mismatch")
	}

	cleanupEvtMgrTest(mgr, t)
	mgr.CleanupTopology()
	mgr.CleanupStabilityTimestamp()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Stop TestEventMgr. Tearing down *********************************************************")

	mgr.Close()
	time.Sleep(time.Duration(1000) * time.Millisecond)
}
Ejemplo n.º 25
0
// For this test, use Index Defn Id from 100 - 110
func TestIndexManager(t *testing.T) {

	logging.SetLogLevel(logging.Trace)
	os.MkdirAll("./data/", os.ModePerm)

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})

	logging.Infof("Start Index Manager *********************************************************")

	var msgAddr = "localhost:9884"
	factory := new(util.TestDefaultClientFactory)
	env := new(util.TestDefaultClientEnv)
	admin := manager.NewProjectorAdmin(factory, env, nil)
	mgr, err := manager.NewIndexManagerInternal(msgAddr, "localhost:"+manager.COORD_MAINT_STREAM_PORT, admin, cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer mgr.Close()

	logging.Infof("Cleanup Test *********************************************************")

	cleanupTest(mgr, t)

	logging.Infof("Setup Initial Data *********************************************************")

	//setupInitialData(mgr, t)

	logging.Infof("Start Provider *********************************************************")

	var providerId = "TestMetadataProvider"
	provider, err := client.NewMetadataProvider(providerId)
	if err != nil {
		t.Fatal(err)
	}
	defer provider.Close()
	provider.WatchMetadata(msgAddr)

	logging.Infof("Test Iterator *********************************************************")

	runIterator(mgr, t, 0)

	plan := make(map[string]interface{})
	plan["nodes"] = []string{msgAddr}
	plan["defer_build"] = true
	newDefnId101, err := provider.CreateIndexWithPlan("manager_test_101", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, plan)
	if err != nil {
		t.Fatal("Cannot create Index Defn 101 through MetadataProvider")
	}
	runIterator(mgr, t, 1)

	newDefnId102, err := provider.CreateIndexWithPlan("manager_test_102", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, plan)
	if err != nil {
		t.Fatal("Cannot create Index Defn 102 through MetadataProvider")
	}
	runIterator(mgr, t, 2)

	logging.Infof("Cleanup Test *********************************************************")

	provider.UnwatchMetadata(msgAddr)
	cleanSingleIndex_managerTest(mgr, t, newDefnId101)
	cleanSingleIndex_managerTest(mgr, t, newDefnId102)
	time.Sleep(time.Duration(1000) * time.Millisecond)
}
Ejemplo n.º 26
0
func BenchmarkLoopback(b *testing.B) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 32, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, config, appch)
	if err != nil {
		b.Fatal(err)
	}

	// start endpoint
	config = c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		b.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets)

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				b.Fatal(err)
			}
		}
	}

	go func() {
		nVbs, nMuts, nIndexes, seqno := maxvbuckets, 5, 5, 1
		for {
			dkvs := dataKeyVersions("default0", seqno, nVbs, nMuts, nIndexes)
			dkvs = append(dkvs, dataKeyVersions("default1", seqno, nVbs, nMuts, nIndexes)...)
			for _, dkv := range dkvs {
				endp.Send(dkv)
			}
			seqno += nMuts
		}
	}()

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		select {
		case msg := <-appch:
			if _, ok := msg.([]*protobuf.VbKeyVersions); !ok {
				b.Fatalf("unexpected type in loopback %T", msg)
			}
		}
	}

	endp.Close()
	daemon.Close()
}
Ejemplo n.º 27
0
func TestTimeout(t *testing.T) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 4, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	dconfig := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, dconfig, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start endpoint
	config := c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		t.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
	}

	go func() { // this routine will keep one connection alive
		for i := 0; ; i++ {
			vbmap := vbmaps[0] // keep sending sync for first vbucket alone
			idx := i % len(vbmap.Vbuckets)
			vbno, vbuuid := vbmap.Vbuckets[idx], vbmap.Vbuuids[idx]
			// send sync messages
			kv := c.NewKeyVersions(10, nil, 1)
			kv.AddSync()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
			<-time.After(
				time.Duration(dconfig["tcpReadDeadline"].Int()) * time.Millisecond)
		}
	}()

	wait := true
	for wait {
		select {
		case msg := <-appch:
			switch ce := msg.(type) {
			case []*protobuf.VbKeyVersions:
			case ConnectionError:
				ref := maxvbuckets
				t.Logf("%T %v \n", ce, ce)
				if len(ce) != 2 {
					t.Fatal("mismatch in ConnectionError")
				}
				refBuckets := map[string]bool{"default0": true, "default1": true}
				for bucket, vbnos := range ce {
					delete(refBuckets, bucket)
					if len(vbnos) != ref {
						t.Fatalf("mismatch in ConnectionError %v %v", vbnos, ref)
					}
				}
				if len(refBuckets) > 0 {
					t.Fatalf("mismatch in ConnectionError %v", refBuckets)
				}
				wait = false

			default:
				t.Fatalf("expected connection error %T", msg)
			}
		}
	}

	<-time.After(100 * time.Millisecond)
	endp.Close()

	<-time.After(100 * time.Millisecond)
	daemon.Close()
}
Ejemplo n.º 28
0
func TestLoopback(t *testing.T) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 32, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, config, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start endpoint
	config = c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		t.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
	}

	count, seqno := 200, 1
	for i := 1; i <= count; i += 2 {
		nVbs, nMuts, nIndexes := maxvbuckets, 5, 5
		dkvs := dataKeyVersions("default0", seqno, nVbs, nMuts, nIndexes)
		dkvs = append(dkvs, dataKeyVersions("default1", seqno, nVbs, nMuts, nIndexes)...)
		for _, dkv := range dkvs {
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
		seqno += nMuts

		// gather
		pvbs := make([]*protobuf.VbKeyVersions, 0)
	loop:
		for {
			select {
			case msg := <-appch:
				//t.Logf("%T %v\n", msg, msg)
				if pvbsSub, ok := msg.([]*protobuf.VbKeyVersions); !ok {
					t.Fatalf("unexpected type in loopback %T", msg)
				} else {
					pvbs = append(pvbs, pvbsSub...)
				}
			case <-time.After(10 * time.Millisecond):
				break loop
			}
		}
		commands := make(map[byte]int)
		for _, vb := range protobuf2VbKeyVersions(pvbs) {
			for _, kv := range vb.Kvs {
				for _, cmd := range kv.Commands {
					if _, ok := commands[byte(cmd)]; !ok {
						commands[byte(cmd)] = 0
					}
					commands[byte(cmd)]++
				}
			}
		}
		if StreamBegins, ok := commands[c.StreamBegin]; ok && StreamBegins != 64 {
			t.Fatalf("unexpected response %v", StreamBegins)
		}
		if commands[c.Upsert] != 1600 {
			t.Fatalf("unexpected response %v", commands[c.Upsert])
		}
	}

	endp.Close()
	daemon.Close()
}
Ejemplo n.º 29
0
// For this test, use Index Defn Id from 100 - 110
func TestMetadataProvider(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	logging.Infof("Start Index Manager *********************************************************")

	var msgAddr = "localhost:9884"
	var httpAddr = "localhost:9885"
	factory := new(util.TestDefaultClientFactory)
	env := new(util.TestDefaultClientEnv)
	admin := manager.NewProjectorAdmin(factory, env, nil)
	addrPrv := util.NewFakeAddressProvider(msgAddr, httpAddr)
	mgr, err := manager.NewIndexManagerInternal(addrPrv, admin, cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer mgr.Close()
	gMgr = mgr

	logging.Infof("Cleanup Test *********************************************************")

	cleanupTest(mgr, t)

	logging.Infof("Setup Initial Data *********************************************************")

	setupInitialData(mgr, t)

	logging.Infof("Start Provider *********************************************************")

	var providerId = "TestMetadataProvider"
	provider, err := client.NewMetadataProvider(providerId)
	if err != nil {
		t.Fatal(err)
	}
	defer provider.Close()
	provider.SetTimeout(int64(time.Second) * 15)
	indexerId, err := provider.WatchMetadata(msgAddr)
	if err != nil {
		t.Fatal(err)
	}

	// the gometa server is running in the same process as MetadataProvider (client).  So sleep to
	// make sure that the server has a chance to finish off initialization, since the client may
	// be ready, but the server is not.
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Verify Initial Data *********************************************************")

	meta := lookup(provider, common.IndexDefnId(100))
	if meta == nil {
		t.Fatal("Cannot find Index Defn 100 from MetadataProvider")
	}
	logging.Infof("found Index Defn 100")
	if len(meta.Instances) == 0 || meta.Instances[0].State != common.INDEX_STATE_READY {
		t.Fatal("Index Defn 100 state is not ready")
	}
	if meta.Instances[0].IndexerId != indexerId {
		t.Fatal("Index Defn 100 state is not ready")
	}

	meta = lookup(provider, common.IndexDefnId(101))
	if meta == nil {
		t.Fatal("Cannot find Index Defn 101 from MetadataProvider")
	}
	logging.Infof("found Index Defn 101")
	if len(meta.Instances) == 0 || meta.Instances[0].State != common.INDEX_STATE_READY {
		t.Fatal("Index Defn 101 state is not ready")
	}
	if meta.Instances[0].IndexerId != indexerId {
		t.Fatal("Index Defn 100 state is not ready")
	}

	logging.Infof("Change Data *********************************************************")

	notifier := &notifier{hasCreated: false, hasDeleted: false}
	mgr.RegisterNotifier(notifier)

	// Create Index with deployment plan (deferred)
	plan := make(map[string]interface{})
	plan["nodes"] = []interface{}{msgAddr}
	plan["defer_build"] = true
	newDefnId, err := provider.CreateIndexWithPlan("metadata_provider_test_102", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, plan)
	if err != nil {
		t.Fatal("Cannot create Index Defn 102 through MetadataProvider" + err.Error())
	}
	input := make([]common.IndexDefnId, 1)
	input[0] = newDefnId
	if err := provider.BuildIndexes(input); err != nil {
		t.Fatal("Cannot build Index Defn : %v", err)
	}
	logging.Infof("done creating index 102")

	// Drop a seeded index (created during setup step)
	if err := provider.DropIndex(common.IndexDefnId(101)); err != nil {
		t.Fatal("Cannot drop Index Defn 101 through MetadataProvider")
	}
	logging.Infof("done dropping index 101")

	// Create Index (immediate).
	newDefnId2, err := provider.CreateIndexWithPlan("metadata_provider_test_103", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, nil)
	if err != nil {
		t.Fatal("Cannot create Index Defn 103 through MetadataProvider")
	}
	logging.Infof("done creating index 103")

	// Update instance (set state to ACTIVE)
	if err := mgr.UpdateIndexInstance("Default", newDefnId2, common.INDEX_STATE_ACTIVE, common.StreamId(100), "", nil); err != nil {
		t.Fatal("Fail to update index instance")
	}
	logging.Infof("done updating index 103")

	// Update instance (set error string)
	if err := mgr.UpdateIndexInstance("Default", newDefnId2, common.INDEX_STATE_NIL, common.NIL_STREAM, "testing", nil); err != nil {
		t.Fatal("Fail to update index instance")
	}
	logging.Infof("done updating index 103")

	// Create Index (immediate).  This index is supposed to fail by OnIndexBuild()
	if _, err := provider.CreateIndexWithPlan("metadata_provider_test_104", "Default", common.ForestDB,
		common.N1QL, "Testing", "Testing", []string{"Testing"}, false, nil); err == nil {
		t.Fatal("Error does not propage for create Index Defn 104 through MetadataProvider")
	}
	logging.Infof("done creating index 104")

	logging.Infof("Verify Changed Data *********************************************************")

	if lookup(provider, common.IndexDefnId(100)) == nil {
		t.Fatal("Cannot find Index Defn 100 from MetadataProvider")
	}
	logging.Infof("found Index Defn 100")

	if lookup(provider, common.IndexDefnId(101)) != nil {
		t.Fatal("Found Deleted Index Defn 101 from MetadataProvider")
	}
	logging.Infof("cannot found deleted Index Defn 101")

	if meta = lookup(provider, newDefnId); meta == nil {
		t.Fatal(fmt.Sprintf("Cannot Found Index Defn %d from MetadataProvider", newDefnId))
	} else {
		logging.Infof("Found Index Defn %d", newDefnId)
		logging.Infof("meta.Instance %v", meta.Instances)
		if meta.Instances[0].IndexerId != indexerId {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect endpoint", newDefnId))
		}
		if meta.Definition.WhereExpr != "TestingWhereExpr" {
			t.Fatal(fmt.Sprintf("WhereExpr is missing in Index Defn %v", newDefnId))
		}
		if meta.Instances[0].State != common.INDEX_STATE_INITIAL {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect state", newDefnId))
		}
	}

	if meta = lookup(provider, newDefnId2); meta == nil {
		t.Fatal(fmt.Sprintf("Cannot Found Index Defn %d from MetadataProvider", newDefnId2))
	} else {
		logging.Infof("Found Index Defn %d", newDefnId2)
		logging.Infof("meta.Instance %v", meta.Instances)
		if meta.Instances[0].IndexerId != indexerId {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect endpoint", newDefnId2))
		}
		if meta.Definition.WhereExpr != "TestingWhereExpr" {
			t.Fatal(fmt.Sprintf("WhereExpr is missing in Index Defn %v", newDefnId2))
		}
		if meta.Instances[0].State != common.INDEX_STATE_ACTIVE {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect state", newDefnId2))
		}
		if meta.Instances[0].Error != "testing" {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect error string", newDefnId2))
		}
		if meta.Instances[0].BuildTime[10] != 33 {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect buildtime", newDefnId2))
		}
	}

	if !notifier.hasCreated {
		t.Fatal(fmt.Sprintf("Does not recieve notification for creating index %s", newDefnId))
	}
	logging.Infof(fmt.Sprintf("Recieve notification for creating index %v", newDefnId))

	if !notifier.hasDeleted {
		t.Fatal("Does not recieve notification for deleting index 101")
	}
	logging.Infof("Recieve notification for deleting index 101")

	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Verify Cleanup / Timeout *********************************************************")

	// Create Index (immediate).

	newDefnId105, err := provider.CreateIndexWithPlan("metadata_provider_test_105", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, nil)
	if err == nil {
		t.Fatal("Does not receive timeout error for create Index Defn 105 through MetadataProvider")
	}
	logging.Infof("recieve expected timeout error when creating index 105")
	close(metadata_provider_test_done)

	logging.Infof("Cleanup Test *********************************************************")

	provider.UnwatchMetadata(indexerId)
	cleanupTest(mgr, t)
	cleanSingleIndex(mgr, t, newDefnId)
	cleanSingleIndex(mgr, t, newDefnId2)
	cleanSingleIndex(mgr, t, newDefnId105)
	time.Sleep(time.Duration(1000) * time.Millisecond)
}