Example #1
0
//
// Create a timer that keeps track of the timestamp history across streams and buckets
//
func newTimer(repo *MetadataRepo) *Timer {

	timestamps := make(map[common.StreamId]timestampHistoryBucketMap)
	tickers := make(map[common.StreamId]tickerBucketMap)
	stopchs := make(map[common.StreamId]stopchBucketMap)
	outch := make(chan *timestampSerializable, TIMESTAMP_CHANNEL_SIZE)

	timer := &Timer{timestamps: timestamps,
		tickers: tickers,
		stopchs: stopchs,
		outch:   outch,
		ready:   false}

	savedTimestamps, err := repo.GetStabilityTimestamps()
	if err == nil {
		for _, timestamp := range savedTimestamps.Timestamps {
			ts, err := unmarshallTimestamp(timestamp.Timestamp)
			if err != nil {
				logging.Errorf("Timer.newTimer() : unable to unmarshall timestamp for bucket %v.  Skip initialization.",
					timestamp.Bucket)
				continue
			}
			timer.start(common.StreamId(timestamp.StreamId), timestamp.Bucket)
			for vb, seqno := range ts.Seqnos {
				timer.increment(common.StreamId(timestamp.StreamId), timestamp.Bucket, uint32(vb), ts.Vbuuids[vb], seqno)
			}
			logging.Errorf("Timer.newTimer() : initialized timestamp for bucket %v from repository.", timestamp.Bucket)
		}
	} else {
		// TODO : Determine timestamp not exist versus forestdb error
		logging.Errorf("Timer.newTimer() : cannot get stability timestamp from repository. Skip initialization.")
	}

	return timer
}
Example #2
0
func (m *LifecycleMgr) handleDeleteBucket(bucket string, content []byte) error {

	result := error(nil)

	if len(content) == 0 {
		return errors.New("invalid argument")
	}

	streamId := common.StreamId(content[0])

	topology, err := m.repo.GetTopologyByBucket(bucket)
	if err == nil {
		/*
			// if there is an error getting the UUID, this means that
			// the node is not able to connect to pool service in order
			// to fetch the bucket UUID.   Return an error and skip.
			uuid, err := m.getBucketUUID(bucket)
			if err != nil {
				logging.Errorf("LifecycleMgr.handleDeleteBucket() : Encounter when connecting to pool service = %v", err)
				return err
			}
		*/

		// At this point, we are able to connect to pool service.  If pool
		// does not contain the bucket, then we delete all index defn in
		// the bucket.  Otherwise, delete index defn that does not have the
		// same bucket UUID.  Note that any other create index request will
		// be blocked while this call is run.
		definitions := make([]IndexDefnDistribution, len(topology.Definitions))
		copy(definitions, topology.Definitions)

		for _, defnRef := range definitions {

			if defn, err := m.repo.GetIndexDefnById(common.IndexDefnId(defnRef.DefnId)); err == nil {

				logging.Debugf("LifecycleMgr.handleDeleteBucket() : index instance: id %v, streamId %v.",
					defn.DefnId, defnRef.Instances[0].StreamId)

				// delete index defn from the bucket if bucket uuid is not specified or
				// index does *not* belong to bucket uuid
				if /* (uuid == common.BUCKET_UUID_NIL || defn.BucketUUID != uuid) && */
				streamId == common.NIL_STREAM || common.StreamId(defnRef.Instances[0].StreamId) == streamId {
					if err := m.DeleteIndex(common.IndexDefnId(defn.DefnId), false); err != nil {
						result = err
					}
				}
			} else {
				logging.Debugf("LifecycleMgr.handleDeleteBucket() : Cannot find index instance %v.  Skip.", defnRef.DefnId)
			}
		}
	} else if err != fdb.RESULT_KEY_NOT_FOUND {
		result = err
	}

	return result
}
Example #3
0
func (m *LifecycleMgr) UpdateIndexInstance(bucket string, defnId common.IndexDefnId, state common.IndexState,
	streamId common.StreamId, errStr string, buildTime []uint64) error {

	topology, err := m.repo.GetTopologyByBucket(bucket)
	if err != nil {
		logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
		return err
	}

	changed := false
	if state != common.INDEX_STATE_NIL {
		changed = topology.UpdateStateForIndexInstByDefn(common.IndexDefnId(defnId), common.IndexState(state)) || changed
	}

	if streamId != common.NIL_STREAM {
		changed = topology.UpdateStreamForIndexInstByDefn(common.IndexDefnId(defnId), common.StreamId(streamId)) || changed
	}

	changed = topology.SetErrorForIndexInstByDefn(common.IndexDefnId(defnId), errStr) || changed

	if changed {
		if err := m.repo.SetTopologyByBucket(bucket, topology); err != nil {
			logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
			return err
		}
	}

	return nil
}
func (n *notifier) OnIndexBuild(id []common.IndexDefnId) error {
	buildTime := make([]uint64, 1024)
	buildTime[10] = 33
	err := gMgr.UpdateIndexInstance("Default", id[0], common.INDEX_STATE_INITIAL, common.StreamId(100), "", buildTime)

	// change the value to test copy works
	buildTime[10] = 34
	return err
}
Example #5
0
func (m *LifecycleMgr) handleTopologyChange(content []byte) error {

	change := new(topologyChange)
	if err := json.Unmarshal(content, change); err != nil {
		return err
	}

	return m.UpdateIndexInstance(change.Bucket, common.IndexDefnId(change.DefnId), common.IndexState(change.State),
		common.StreamId(change.StreamId), change.Error, change.BuildTime)
}
func (c *clustMgrAgent) handleGetGlobalTopology(cmd Message) {

	logging.Debugf("ClustMgr:handleGetGlobalTopology %v", cmd)

	//get the latest topology from manager
	metaIter, err := c.mgr.NewIndexDefnIterator()
	if err != nil {
		common.CrashOnError(err)
	}
	defer metaIter.Close()

	indexInstMap := make(common.IndexInstMap)

	for _, defn, err := metaIter.Next(); err == nil; _, defn, err = metaIter.Next() {

		var idxDefn common.IndexDefn
		idxDefn = *defn

		t, e := c.mgr.GetTopologyByBucket(idxDefn.Bucket)
		if e != nil {
			common.CrashOnError(e)
		}

		inst := t.GetIndexInstByDefn(idxDefn.DefnId)

		if inst == nil {
			logging.Warnf("ClustMgr:handleGetGlobalTopology Index Instance Not "+
				"Found For Index Definition %v. Ignored.", idxDefn)
			continue
		}

		//for indexer, Ready state doesn't matter. Till index build,
		//the index stays in Created state.
		var state common.IndexState
		instState := common.IndexState(inst.State)
		if instState == common.INDEX_STATE_READY {
			state = common.INDEX_STATE_CREATED
		} else {
			state = instState
		}

		idxInst := common.IndexInst{InstId: common.IndexInstId(inst.InstId),
			Defn:   idxDefn,
			State:  state,
			Stream: common.StreamId(inst.StreamId),
		}

		indexInstMap[idxInst.InstId] = idxInst

	}

	c.supvCmdch <- &MsgClustMgrTopology{indexInstMap: indexInstMap}
}
Example #7
0
func (l *timestampListSerializable) removeTimestamp(streamId common.StreamId, bucket string) {

	for i, t := range l.Timestamps {
		if t.Bucket == bucket && common.StreamId(t.StreamId) == streamId {
			if i < len(l.Timestamps)-1 {
				l.Timestamps = append(l.Timestamps[0:i], l.Timestamps[i+1:]...)
			} else {
				l.Timestamps = l.Timestamps[0:i]
			}
			return
		}
	}
}
Example #8
0
func (m *IndexManager) notifyNewTimestamp(wrapper *timestampSerializable) {

	logging.Debugf("IndexManager.notifyNewTimestamp(): receive new timestamp, notifying to listener")
	streamId := common.StreamId(wrapper.StreamId)
	timestamp, err := unmarshallTimestamp(wrapper.Timestamp)
	if err != nil {
		logging.Debugf("IndexManager.notifyNewTimestamp(): error when unmarshalling timestamp. Ignore timestamp.  Error=%s", err.Error())
	} else {
		ch, ok := m.timestampCh[streamId]
		if ok {
			if len(ch) < TIMESTAMP_NOTIFY_CH_SIZE {
				ch <- timestamp
			}
		}
	}
}
Example #9
0
func (l *timestampListSerializable) findTimestamp(streamId common.StreamId, bucket string, vb uint16) (uint64, uint64, bool, error) {

	for _, t := range l.Timestamps {
		if t.Bucket == bucket && common.StreamId(t.StreamId) == streamId {

			logging.Debugf("timestampListSerializable.findTimestamp() : found timestamp for streamId %v bucket %v.",
				streamId, bucket)

			ts, err := unmarshallTimestamp(t.Timestamp)
			if err != nil {
				logging.Errorf("timestampListSerializable.findTimestamp() : unable to unmarshall timestamp for bucket %v.",
					t.Bucket)
				return 0, 0, false, err
			}

			logging.Debugf("timestampListSerializable.findTimestamp() : seqNo for vb %d is %d.", vb, ts.Seqnos[vb])

			return ts.Seqnos[vb], ts.Vbuuids[vb], true, nil
		}
	}

	return 0, 0, false, nil
}
Example #10
0
func setupInitialData_managerTest(mgr *manager.IndexManager, t *testing.T) {

	// Add a new index definition : 100
	idxDefn := &common.IndexDefn{
		DefnId:          common.IndexDefnId(100),
		Name:            "index_manager_test_100",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		PartitionScheme: common.HASH,
		PartitionKey:    "Testing"}

	err := mgr.HandleCreateIndexDDL(idxDefn)
	if err != nil {
		t.Fatal(err)
	}

	// Add a new index definition : 101
	idxDefn = &common.IndexDefn{
		DefnId:          common.IndexDefnId(101),
		Name:            "index_manager_test_101",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		PartitionScheme: common.HASH,
		PartitionKey:    "Testing"}

	err = mgr.HandleCreateIndexDDL(idxDefn)
	if err != nil {
		t.Fatal(err)
	}

	err = mgr.UpdateIndexInstance("Default", common.IndexDefnId(101), common.INDEX_STATE_ACTIVE, common.StreamId(0), "")
	if err != nil {
		util.TT.Fatal(err)
	}

	err = mgr.UpdateIndexInstance("Default", common.IndexDefnId(102), common.INDEX_STATE_ACTIVE, common.StreamId(0), "")
	if err != nil {
		util.TT.Fatal(err)
	}
}
Example #11
0
// For this test, use Index Defn Id from 100 - 110
func TestMetadataProvider(t *testing.T) {

	logging.SetLogLevel(logging.Trace)

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	logging.Infof("Start Index Manager *********************************************************")

	var msgAddr = "localhost:9884"
	var httpAddr = "localhost:9885"
	factory := new(util.TestDefaultClientFactory)
	env := new(util.TestDefaultClientEnv)
	admin := manager.NewProjectorAdmin(factory, env, nil)
	addrPrv := util.NewFakeAddressProvider(msgAddr, httpAddr)
	mgr, err := manager.NewIndexManagerInternal(addrPrv, admin, cfg)
	if err != nil {
		t.Fatal(err)
	}
	defer mgr.Close()
	gMgr = mgr

	logging.Infof("Cleanup Test *********************************************************")

	cleanupTest(mgr, t)

	logging.Infof("Setup Initial Data *********************************************************")

	setupInitialData(mgr, t)

	logging.Infof("Start Provider *********************************************************")

	var providerId = "TestMetadataProvider"
	provider, err := client.NewMetadataProvider(providerId)
	if err != nil {
		t.Fatal(err)
	}
	defer provider.Close()
	provider.SetTimeout(int64(time.Second) * 15)
	indexerId, err := provider.WatchMetadata(msgAddr)
	if err != nil {
		t.Fatal(err)
	}

	// the gometa server is running in the same process as MetadataProvider (client).  So sleep to
	// make sure that the server has a chance to finish off initialization, since the client may
	// be ready, but the server is not.
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Verify Initial Data *********************************************************")

	meta := lookup(provider, common.IndexDefnId(100))
	if meta == nil {
		t.Fatal("Cannot find Index Defn 100 from MetadataProvider")
	}
	logging.Infof("found Index Defn 100")
	if len(meta.Instances) == 0 || meta.Instances[0].State != common.INDEX_STATE_READY {
		t.Fatal("Index Defn 100 state is not ready")
	}
	if meta.Instances[0].IndexerId != indexerId {
		t.Fatal("Index Defn 100 state is not ready")
	}

	meta = lookup(provider, common.IndexDefnId(101))
	if meta == nil {
		t.Fatal("Cannot find Index Defn 101 from MetadataProvider")
	}
	logging.Infof("found Index Defn 101")
	if len(meta.Instances) == 0 || meta.Instances[0].State != common.INDEX_STATE_READY {
		t.Fatal("Index Defn 101 state is not ready")
	}
	if meta.Instances[0].IndexerId != indexerId {
		t.Fatal("Index Defn 100 state is not ready")
	}

	logging.Infof("Change Data *********************************************************")

	notifier := &notifier{hasCreated: false, hasDeleted: false}
	mgr.RegisterNotifier(notifier)

	// Create Index with deployment plan (deferred)
	plan := make(map[string]interface{})
	plan["nodes"] = []interface{}{msgAddr}
	plan["defer_build"] = true
	newDefnId, err := provider.CreateIndexWithPlan("metadata_provider_test_102", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, plan)
	if err != nil {
		t.Fatal("Cannot create Index Defn 102 through MetadataProvider" + err.Error())
	}
	input := make([]common.IndexDefnId, 1)
	input[0] = newDefnId
	if err := provider.BuildIndexes(input); err != nil {
		t.Fatal("Cannot build Index Defn : %v", err)
	}
	logging.Infof("done creating index 102")

	// Drop a seeded index (created during setup step)
	if err := provider.DropIndex(common.IndexDefnId(101)); err != nil {
		t.Fatal("Cannot drop Index Defn 101 through MetadataProvider")
	}
	logging.Infof("done dropping index 101")

	// Create Index (immediate).
	newDefnId2, err := provider.CreateIndexWithPlan("metadata_provider_test_103", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, nil)
	if err != nil {
		t.Fatal("Cannot create Index Defn 103 through MetadataProvider")
	}
	logging.Infof("done creating index 103")

	// Update instance (set state to ACTIVE)
	if err := mgr.UpdateIndexInstance("Default", newDefnId2, common.INDEX_STATE_ACTIVE, common.StreamId(100), "", nil); err != nil {
		t.Fatal("Fail to update index instance")
	}
	logging.Infof("done updating index 103")

	// Update instance (set error string)
	if err := mgr.UpdateIndexInstance("Default", newDefnId2, common.INDEX_STATE_NIL, common.NIL_STREAM, "testing", nil); err != nil {
		t.Fatal("Fail to update index instance")
	}
	logging.Infof("done updating index 103")

	// Create Index (immediate).  This index is supposed to fail by OnIndexBuild()
	if _, err := provider.CreateIndexWithPlan("metadata_provider_test_104", "Default", common.ForestDB,
		common.N1QL, "Testing", "Testing", []string{"Testing"}, false, nil); err == nil {
		t.Fatal("Error does not propage for create Index Defn 104 through MetadataProvider")
	}
	logging.Infof("done creating index 104")

	logging.Infof("Verify Changed Data *********************************************************")

	if lookup(provider, common.IndexDefnId(100)) == nil {
		t.Fatal("Cannot find Index Defn 100 from MetadataProvider")
	}
	logging.Infof("found Index Defn 100")

	if lookup(provider, common.IndexDefnId(101)) != nil {
		t.Fatal("Found Deleted Index Defn 101 from MetadataProvider")
	}
	logging.Infof("cannot found deleted Index Defn 101")

	if meta = lookup(provider, newDefnId); meta == nil {
		t.Fatal(fmt.Sprintf("Cannot Found Index Defn %d from MetadataProvider", newDefnId))
	} else {
		logging.Infof("Found Index Defn %d", newDefnId)
		logging.Infof("meta.Instance %v", meta.Instances)
		if meta.Instances[0].IndexerId != indexerId {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect endpoint", newDefnId))
		}
		if meta.Definition.WhereExpr != "TestingWhereExpr" {
			t.Fatal(fmt.Sprintf("WhereExpr is missing in Index Defn %v", newDefnId))
		}
		if meta.Instances[0].State != common.INDEX_STATE_INITIAL {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect state", newDefnId))
		}
	}

	if meta = lookup(provider, newDefnId2); meta == nil {
		t.Fatal(fmt.Sprintf("Cannot Found Index Defn %d from MetadataProvider", newDefnId2))
	} else {
		logging.Infof("Found Index Defn %d", newDefnId2)
		logging.Infof("meta.Instance %v", meta.Instances)
		if meta.Instances[0].IndexerId != indexerId {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect endpoint", newDefnId2))
		}
		if meta.Definition.WhereExpr != "TestingWhereExpr" {
			t.Fatal(fmt.Sprintf("WhereExpr is missing in Index Defn %v", newDefnId2))
		}
		if meta.Instances[0].State != common.INDEX_STATE_ACTIVE {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect state", newDefnId2))
		}
		if meta.Instances[0].Error != "testing" {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect error string", newDefnId2))
		}
		if meta.Instances[0].BuildTime[10] != 33 {
			t.Fatal(fmt.Sprintf("Index Defn %v has incorrect buildtime", newDefnId2))
		}
	}

	if !notifier.hasCreated {
		t.Fatal(fmt.Sprintf("Does not recieve notification for creating index %s", newDefnId))
	}
	logging.Infof(fmt.Sprintf("Recieve notification for creating index %v", newDefnId))

	if !notifier.hasDeleted {
		t.Fatal("Does not recieve notification for deleting index 101")
	}
	logging.Infof("Recieve notification for deleting index 101")

	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("Verify Cleanup / Timeout *********************************************************")

	// Create Index (immediate).

	newDefnId105, err := provider.CreateIndexWithPlan("metadata_provider_test_105", "Default", common.ForestDB,
		common.N1QL, "Testing", "TestingWhereExpr", []string{"Testing"}, false, nil)
	if err == nil {
		t.Fatal("Does not receive timeout error for create Index Defn 105 through MetadataProvider")
	}
	logging.Infof("recieve expected timeout error when creating index 105")
	close(metadata_provider_test_done)

	logging.Infof("Cleanup Test *********************************************************")

	provider.UnwatchMetadata(indexerId)
	cleanupTest(mgr, t)
	cleanSingleIndex(mgr, t, newDefnId)
	cleanSingleIndex(mgr, t, newDefnId2)
	cleanSingleIndex(mgr, t, newDefnId105)
	time.Sleep(time.Duration(1000) * time.Millisecond)
}