예제 #1
0
//send the actual MutationStreamRequest on adminport
func (k *kvSender) sendMutationTopicRequest(ap *projClient.Client, topic string,
	reqTimestamps *protobuf.TsVbuuid,
	instances []*protobuf.Instance) (*protobuf.TopicResponse, error) {

	logging.Infof("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tInstances %v",
		ap, topic, reqTimestamps.GetBucket(), instances)

	logging.LazyVerbosef("KVSender::sendMutationTopicRequest RequestTS %v", reqTimestamps.Repr)

	endpointType := "dataport"

	if res, err := ap.MutationTopicRequest(topic, endpointType,
		[]*protobuf.TsVbuuid{reqTimestamps}, instances); err != nil {
		logging.Fatalf("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tUnexpected Error %v", ap,
			topic, reqTimestamps.GetBucket(), err)

		return res, err
	} else {
		logging.Infof("KVSender::sendMutationTopicRequest Success Projector %v Topic %v %v InstanceIds %v",
			ap, topic, reqTimestamps.GetBucket(), res.GetInstanceIds())
		if logging.IsEnabled(logging.Verbose) {
			logging.Verbosef("KVSender::sendMutationTopicRequest ActiveTs %v \n\tRollbackTs %v",
				debugPrintTs(res.GetActiveTimestamps(), reqTimestamps.GetBucket()),
				debugPrintTs(res.GetRollbackTimestamps(), reqTimestamps.GetBucket()))
		}
		return res, nil
	}
}
예제 #2
0
// - return ErrorInvalidKVaddrs for malformed vbuuid.
// - return ErrorInconsistentFeed for malformed feed request.
// - return ErrorInvalidVbucketBranch for malformed vbuuid.
// - return dcp-client failures.
// - return ErrorResponseTimeout if request is not completed within timeout.
func (p *Projector) doMutationTopic(
	request *protobuf.MutationTopicRequest,
	opaque uint16) ap.MessageMarshaller {

	topic := request.GetTopic()

	// log this request.
	prefix := p.logPrefix
	logging.Infof("%v ##%x doMutationTopic() %q\n", prefix, opaque, topic)
	defer logging.Infof("%v ##%x doMutationTopic() returns ...\n", prefix, opaque)

	var err error
	feed, _ := p.acquireFeed(topic)
	defer p.releaseFeed(topic)
	if feed == nil {
		config := p.GetFeedConfig()
		feed, err = NewFeed(p.pooln, topic, config, opaque)
		if err != nil {
			fmsg := "%v ##%x unable to create feed %v\n"
			logging.Errorf(fmsg, prefix, opaque, topic)
			return (&protobuf.TopicResponse{}).SetErr(err)
		}
	}
	response, err := feed.MutationTopic(request, opaque)
	if err != nil {
		response.SetErr(err)
	}
	p.AddFeed(topic, feed)
	return response
}
예제 #3
0
// run test
func runTimerTestReceiver(mgr *manager.IndexManager, ch chan *common.TsVbuuid, donech chan bool) {

	logging.Infof("Run Timer Test Receiver")
	defer close(donech)

	// wait for the sync message to arrive
	ticker := time.NewTicker(time.Duration(60) * time.Second)
	for {
		select {
		case ts := <-ch:
			if ts.Seqnos[10] == 406 {

				// wait to avoid race condition -- this is timing dependent.
				time.Sleep(time.Duration(2000) * time.Millisecond)

				seqno, ok := mgr.GetStabilityTimestampForVb(common.MAINT_STREAM, "Default", uint16(10))
				if !ok || seqno != ts.Seqnos[10] {
					util.TT.Fatal("runTimerTestReceiver(): timestamp seqno does not match with repo.  %d != %d",
						ts.Seqnos[10], seqno)
				} else {
					logging.Infof("****** runTimerTestReceiver() receive correct stability timestamp")
					return
				}
			}
		case <-ticker.C:
			logging.Infof("****** runTimerTestReceiver() : timeout")
			util.TT.Fatal("runTimerTestReceiver(): Timeout waiting to receive timestamp to arrive")
		}
	}

	logging.Infof("runTimerTestReceiver() done")
}
예제 #4
0
func dropIndexRequest(t *testing.T) {

	logging.Infof("********** Start dropIndexRequest")

	// Construct request body.
	info := common.IndexDefn{
		DefnId: common.IndexDefnId(500),
		Name:   "request_handler_test",
		Bucket: "Default",
	}

	req := manager.IndexRequest{Version: uint64(1), Type: manager.DROP, Index: info}
	body, err := json.Marshal(req)
	if err != nil {
		t.Fatal(err)
	}

	bodybuf := bytes.NewBuffer(body)
	resp, err := http.Post("http://localhost:9102/dropIndex", "application/json", bodybuf)
	if err != nil {
		t.Fatal(err)
	}

	validateIndexResponse(resp, t)

	logging.Infof("********** Done dropIndexRequest")
}
예제 #5
0
파일: upr.go 프로젝트: prataprc/indexing
func printFlogs(vbnos []uint16, flogs couchbase.FailoverLog) {
	for i, vbno := range vbnos {
		logging.Infof("Failover log for vbucket %v\n", vbno)
		logging.Infof("   %#v\n", flogs[uint16(i)])
	}
	logging.Infof("\n")
}
예제 #6
0
// start up
func changeTopologyForTimerTest(mgr *manager.IndexManager) {

	// Add a new index definition : 406
	idxDefn := &common.IndexDefn{
		DefnId:          common.IndexDefnId(406),
		Name:            "stream_mgr_timer_test",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		PartitionScheme: common.HASH,
		PartitionKey:    "Testing"}

	logging.Infof("Run Timer Test : Create Index Defn 406")
	if err := mgr.HandleCreateIndexDDL(idxDefn); err != nil {
		util.TT.Fatal(err)
	}
	// Wait so there is no race condition.
	time.Sleep(time.Duration(1000) * time.Millisecond)

	// Update the index definition to ready
	logging.Infof("Run Timer Test : Update Index Defn 406 to READY")
	topology, err := mgr.GetTopologyByBucket("Default")
	if err != nil {
		util.TT.Fatal(err)
	}
	topology.ChangeStateForIndexInstByDefn(common.IndexDefnId(406), common.INDEX_STATE_CREATED, common.INDEX_STATE_READY)
	if err := mgr.SetTopologyByBucket("Default", topology); err != nil {
		util.TT.Fatal(err)
	}
}
예제 #7
0
파일: bench.go 프로젝트: prataprc/indexing
func startBucket(cluster, bucketn string, rch chan []interface{}) int {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("Recovered from panic %v", r)
			logging.Errorf(logging.StackTrace())
		}
	}()

	logging.Infof("Connecting with %q\n", bucketn)
	b, err := common.ConnectBucket(cluster, "default", bucketn)
	mf(err, "bucket")

	dcpConfig := map[string]interface{}{
		"genChanSize":  10000,
		"dataChanSize": 10000,
	}
	dcpFeed, err := b.StartDcpFeed("rawupr", uint32(0), 0xABCD, dcpConfig)
	mf(err, "- upr")

	vbnos := listOfVbnos(options.maxVbno)
	flogs, err := b.GetFailoverLogs(0xABCD, vbnos, dcpConfig)
	mf(err, "- dcp failoverlogs")
	if options.printflogs {
		printFlogs(vbnos, flogs)
	}
	go startDcp(dcpFeed, flogs)

	for {
		e, ok := <-dcpFeed.C
		if ok == false {
			logging.Infof("Closing for bucket %q\n", bucketn)
		}
		rch <- []interface{}{bucketn, e}
	}
}
예제 #8
0
// - return ErrorTopicMissing if feed is not started.
// - return ErrorInconsistentFeed for malformed feed request
// - return ErrorInvalidVbucketBranch for malformed vbuuid.
// - return dcp-client failures.
// - return ErrorResponseTimeout if request is not completed within timeout.
func (p *Projector) doAddBuckets(
	request *protobuf.AddBucketsRequest, opaque uint16) ap.MessageMarshaller {

	topic := request.GetTopic()

	// log this request.
	prefix := p.logPrefix
	logging.Infof("%v ##%x doAddBuckets() %q\n", prefix, opaque, topic)
	defer logging.Infof("%v ##%x doAddBuckets() returns ...\n", prefix, opaque)

	feed, err := p.acquireFeed(topic)
	defer p.releaseFeed(topic)
	if err != nil {
		logging.Errorf("%v ##%x acquireFeed(): %v\n", prefix, opaque, err)
		response := &protobuf.TopicResponse{}
		if err != projC.ErrorTopicMissing {
			response = feed.GetTopicResponse()
		}
		return response.SetErr(err)
	}

	response, err := feed.AddBuckets(request, opaque)
	if err == nil {
		return response
	}
	return response.SetErr(err)
}
예제 #9
0
func (cd *compactionDaemon) needsCompaction(is IndexStorageStats, config common.Config) bool {
	logging.Infof("CompactionDaemon: Checking fragmentation, %s", is.String())

	interval := config["interval"].String()
	isCompactionInterval := true
	if interval != "00:00,00:00" {
		var start_hr, start_min, end_hr, end_min int
		n, err := fmt.Sscanf(interval, "%d:%d,%d:%d", &start_hr, &start_min, &end_hr, &end_min)
		start_min += start_hr * 60
		end_min += end_hr * 60

		if n == 4 && err == nil {
			hr, min, _ := time.Now().Clock()
			min += hr * 60

			if min < start_min || min > end_min {
				isCompactionInterval = false
			}
		}
	}

	if !isCompactionInterval {
		logging.Infof("CompactionDaemon: Compaction attempt skipped since compaction interval is configured for %v", interval)
		return false
	}

	if uint64(is.Stats.DiskSize) > config["min_size"].Uint64() {
		if is.GetFragmentation() >= float64(config["min_frag"].Int()) {
			return true
		}
	}

	return false
}
예제 #10
0
//Calculate mutation queue length from memory quota
func (m *mutationMgr) calcQueueLenFromMemQuota() uint64 {

	memQuota := m.config["settings.memory_quota"].Uint64()
	maxVbLen := m.config["settings.maxVbQueueLength"].Uint64()
	maxVbLenDef := m.config["settings.maxVbQueueLength"].DefaultVal.(uint64)

	//if there is a user specified value, use that
	if maxVbLen != 0 {
		logging.Infof("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
		return maxVbLen
	} else {
		//Formula for calculation(see MB-14876)
		//Below 2GB - 5000 per vbucket
		//2GB to 4GB - 8000 per vbucket
		//Above 4GB - 10000 per vbucket
		if memQuota <= 2*1024*1024*1024 {
			maxVbLen = 5000
		} else if memQuota <= 4*1024*1024*1024 {
			maxVbLen = 8000
		} else {
			maxVbLen = maxVbLenDef
		}
		logging.Infof("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
		return maxVbLen
	}

}
예제 #11
0
func (cm *compactionManager) run() {
	cd := cm.newCompactionDaemon()
	cd.Start()
loop:
	for {
		select {
		case cmd, ok := <-cm.supvCmdCh:
			if ok {
				if cmd.GetMsgType() == COMPACTION_MGR_SHUTDOWN {
					logging.Infof("%v: Shutting Down", cm.logPrefix)
					cm.supvCmdCh <- &MsgSuccess{}
					break loop
				} else if cmd.GetMsgType() == CONFIG_SETTINGS_UPDATE {
					logging.Infof("%v: Refreshing settings", cm.logPrefix)
					cfgUpdate := cmd.(*MsgConfigUpdate)
					fullConfig := cfgUpdate.GetConfig()
					cfg := fullConfig.SectionConfig("settings.compaction.", true)
					cd.ResetConfig(cfg)
					cm.supvCmdCh <- &MsgSuccess{}
				}
			} else {
				break loop
			}
		}
	}

	cd.Stop()
}
예제 #12
0
//send the actual AddInstances request on adminport
func sendAddInstancesRequest(ap *projClient.Client,
	topic string,
	instances []*protobuf.Instance) (*protobuf.TimestampResponse, error) {

	logging.Infof("KVSender::sendAddInstancesRequest Projector %v Topic %v \nInstances %v",
		ap, topic, instances)

	if res, err := ap.AddInstances(topic, instances); err != nil {
		logging.Fatalf("KVSender::sendAddInstancesRequest Unexpected Error During "+
			"Add Instances Request Projector %v Topic %v IndexInst %v. Err %v", ap,
			topic, instances, err)

		return res, err
	} else {
		logging.Infof("KVSender::sendAddInstancesRequest Success Projector %v Topic %v",
			ap, topic)
		logging.LazyDebug(func() string {
			return fmt.Sprintf(
				"KVSender::sendAddInstancesRequest \n\tActiveTs %v ", debugPrintTs(res.GetCurrentTimestamps(), ""))
		})
		return res, nil

	}

}
예제 #13
0
func (c *timerTestProjectorClient) sendSync(instances []*protobuf.Instance) {

	logging.Infof("timerTestProjectorClient.sendSync() ")

	if len(instances) != 1 {
		util.TT.Fatal("timerTestProjectorClient.sendSync(): More than one index instance sent to fake projector")
	}

	for _, inst := range instances {
		if inst.GetIndexInstance().GetDefinition().GetDefnID() == uint64(406) {

			p := util.NewFakeProjector(manager.COORD_MAINT_STREAM_PORT)
			go p.Run(c.donech)

			payloads := make([]*common.VbKeyVersions, 0, 2000)

			// send StreamBegin for all vbuckets
			for i := 0; i < manager.NUM_VB; i++ {
				payload := common.NewVbKeyVersions("Default", uint16(i) /* vb */, 1, 10)
				kv := common.NewKeyVersions(1, []byte("document-name"), 1)
				kv.AddStreamBegin()
				kv.AddSync()
				payload.AddKeyVersions(kv)
				payloads = append(payloads, payload)
			}

			payload := common.NewVbKeyVersions("Default", 10, 1, 10)
			kv := common.NewKeyVersions(100, []byte("document-name"), 1)
			kv.AddSync()
			payload.AddKeyVersions(kv)
			payloads = append(payloads, payload)

			// send payload
			logging.Infof("****** runTimerTestReceiver() sending the first sync message")
			if err := p.Client.SendKeyVersions(payloads, true); err != nil {
				util.TT.Fatal(err)
			}

			payloads = make([]*common.VbKeyVersions, 0, 200)

			payload = common.NewVbKeyVersions("Default", 10, 1, 10)
			kv = common.NewKeyVersions(406, []byte("document-name"), 1)
			kv.AddSync()
			payload.AddKeyVersions(kv)
			payloads = append(payloads, payload)

			// send payload
			logging.Infof("****** runTimerTestReceiver() sending the second sync message")
			if err := p.Client.SendKeyVersions(payloads, true); err != nil {
				util.TT.Fatal(err)
			}
		}
	}
}
예제 #14
0
func (meta *metaNotifier) OnIndexBuild(indexDefnList []common.IndexDefnId, buckets []string) map[common.IndexInstId]error {

	logging.Infof("clustMgrAgent::OnIndexBuild Notification "+
		"Received for Build Index %v", indexDefnList)

	respCh := make(MsgChannel)

	var indexInstList []common.IndexInstId
	for _, defnId := range indexDefnList {
		indexInstList = append(indexInstList, common.IndexInstId(defnId))
	}

	meta.adminCh <- &MsgBuildIndex{indexInstList: indexInstList,
		respCh:     respCh,
		bucketList: buckets}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case CLUST_MGR_BUILD_INDEX_DDL_RESPONSE:
			errMap := res.(*MsgBuildIndexResponse).GetErrorMap()
			logging.Infof("clustMgrAgent::OnIndexBuild returns "+
				"for Build Index %v", indexDefnList)
			return errMap

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexBuild Error "+
				"for Build Index %v. Error %v.", indexDefnList, res)
			err := res.(*MsgError).GetError()
			errMap := make(map[common.IndexInstId]error)
			for _, instId := range indexDefnList {
				errMap[common.IndexInstId(instId)] = errors.New(err.String())
			}
			return errMap

		default:
			logging.Fatalf("clustMgrAgent::OnIndexBuild Unknown Response "+
				"Received for Build Index %v. Response %v", indexDefnList, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexBuild Unexpected Channel Close "+
			"for Create Index %v", indexDefnList)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
예제 #15
0
// - return couchbase SDK error if any.
func (p *Projector) doFailoverLog(
	request *protobuf.FailoverLogRequest, opaque uint16) ap.MessageMarshaller {

	response := &protobuf.FailoverLogResponse{}

	pooln := request.GetPool()
	bucketn := request.GetBucket()
	vbuckets := request.GetVbnos()

	// log this request.
	prefix := p.logPrefix
	fmsg := "%v ##%x doFailoverLog() {%q, %q, %v}\n"
	logging.Infof(fmsg, prefix, opaque, pooln, bucketn, vbuckets)
	defer logging.Infof("%v ##%x doFailoverLog() returns ...\n", prefix, opaque)

	bucket, err := c.ConnectBucket(p.clusterAddr, pooln, bucketn)
	if err != nil {
		logging.Errorf("%v ##%x ConnectBucket(): %v\n", prefix, opaque, err)
		response.Err = protobuf.NewError(err)
		return response
	}
	defer bucket.Close()

	protoFlogs := make([]*protobuf.FailoverLog, 0, len(vbuckets))
	vbnos := c.Vbno32to16(vbuckets)
	dcpConfig := map[string]interface{}{
		"genChanSize":  p.config["projector.dcp.genChanSize"].Int(),
		"dataChanSize": p.config["projector.dcp.dataChanSize"].Int(),
	}
	flogs, err := bucket.GetFailoverLogs(opaque, vbnos, dcpConfig)
	if err == nil {
		for vbno, flog := range flogs {
			vbuuids := make([]uint64, 0, len(flog))
			seqnos := make([]uint64, 0, len(flog))
			for _, x := range flog {
				vbuuids = append(vbuuids, x[0])
				seqnos = append(seqnos, x[1])
			}
			protoFlog := &protobuf.FailoverLog{
				Vbno:    proto.Uint32(uint32(vbno)),
				Vbuuids: vbuuids,
				Seqnos:  seqnos,
			}
			protoFlogs = append(protoFlogs, protoFlog)
		}
	} else {
		logging.Errorf("%v ##%x GetFailoverLogs(): %v\n", prefix, opaque, err)
		response.Err = protobuf.NewError(err)
		return response
	}
	response.Logs = protoFlogs
	return response
}
예제 #16
0
// ResetConfig accepts a full-set or subset of global configuration
// and updates projector related fields.
func (p *Projector) ResetConfig(config c.Config) {
	p.rw.Lock()
	defer p.rw.Unlock()
	defer logging.Infof("%v\n", c.LogRuntime())

	// reset configuration.
	if cv, ok := config["projector.settings.log_level"]; ok {
		logging.SetLogLevel(logging.Level(cv.String()))
	}
	if cv, ok := config["projector.maxCpuPercent"]; ok {
		c.SetNumCPUs(cv.Int())
	}
	p.config = p.config.Override(config)

	// CPU-profiling
	cpuProfile, ok := config["projector.cpuProfile"]
	if ok && cpuProfile.Bool() && p.cpuProfFd == nil {
		cpuProfFname, ok := config["projector.cpuProfFname"]
		if ok {
			fname := cpuProfFname.String()
			logging.Infof("%v cpu profiling => %q\n", p.logPrefix, fname)
			p.cpuProfFd = p.startCPUProfile(fname)

		} else {
			logging.Errorf("Missing cpu-profile o/p filename\n")
		}

	} else if ok && !cpuProfile.Bool() {
		if p.cpuProfFd != nil {
			pprof.StopCPUProfile()
			logging.Infof("%v cpu profiling stopped\n", p.logPrefix)
		}
		p.cpuProfFd = nil

	} else if ok {
		logging.Warnf("%v cpu profiling already active !!\n", p.logPrefix)
	}

	// MEM-profiling
	memProfile, ok := config["projector.memProfile"]
	if ok && memProfile.Bool() {
		memProfFname, ok := config["projector.memProfFname"]
		if ok {
			fname := memProfFname.String()
			if p.takeMEMProfile(fname) {
				logging.Infof("%v mem profile => %q\n", p.logPrefix, fname)
			}
		} else {
			logging.Errorf("Missing mem-profile o/p filename\n")
		}
	}
}
예제 #17
0
func (k *kvSender) sendRestartVbuckets(ap *projClient.Client,
	topic string, connErrVbs []Vbucket,
	restartTs *protobuf.TsVbuuid) (*protobuf.TopicResponse, error) {

	logging.Infof("KVSender::sendRestartVbuckets Projector %v Topic %v %v", ap, topic, restartTs.GetBucket())
	logging.LazyVerbosef("KVSender::sendRestartVbuckets RestartTs %v", restartTs.Repr)

	//Shutdown the vbucket before restart if there was a ConnErr. If the vbucket is already
	//running, projector will ignore the request otherwise
	if len(connErrVbs) != 0 {

		logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets %v Topic %v %v ConnErrVbs %v",
			ap, topic, restartTs.GetBucket(), connErrVbs)

		// Only shutting down the Vb that receieve connection error.  It is probably not harmful
		// to shutdown every VB in the repairTS, including those that only receive StreamEnd.
		// But due to network / projecctor latency, a VB StreamBegin may be coming on the way
		// for those VB (especially when RepairStream has already retried a couple of times).
		// So shutting all VB in restartTs may unnecessarily causing race condition and
		// make the protocol longer to converge. ShutdownVbuckets should have no effect on
		// projector that does not own the Vb.
		shutdownTs := k.computeShutdownTs(restartTs, connErrVbs)

		logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets Projector %v Topic %v %v \n\tShutdownTs %v",
			ap, topic, restartTs.GetBucket(), shutdownTs.Repr())

		if err := ap.ShutdownVbuckets(topic, []*protobuf.TsVbuuid{shutdownTs}); err != nil {
			logging.Errorf("KVSender::sendRestartVbuckets Unexpected Error During "+
				"ShutdownVbuckets Request for Projector %v Topic %v. Err %v.", ap,
				topic, err)

			//all shutdownVbuckets errors are treated as success as it is a best-effort call.
			//RestartVbuckets errors will be acted upon.
		}
	}

	if res, err := ap.RestartVbuckets(topic, []*protobuf.TsVbuuid{restartTs}); err != nil {
		logging.Fatalf("KVSender::sendRestartVbuckets Unexpected Error During "+
			"Restart Vbuckets Request for Projector %v Topic %v %v . Err %v.", ap,
			topic, restartTs.GetBucket(), err)

		return res, err
	} else {
		logging.Infof("KVSender::sendRestartVbuckets Success Projector %v Topic %v %v", ap, topic, restartTs.GetBucket())
		if logging.IsEnabled(logging.Verbose) {
			logging.Verbosef("KVSender::sendRestartVbuckets \nActiveTs %v \nRollbackTs %v",
				debugPrintTs(res.GetActiveTimestamps(), restartTs.GetBucket()),
				debugPrintTs(res.GetRollbackTimestamps(), restartTs.GetBucket()))
		}
		return res, nil
	}
}
예제 #18
0
파일: settings.go 프로젝트: jchris/indexing
func (s *settingsManager) metaKVCallback(path string, value []byte, rev interface{}) error {
	if path == common.IndexingSettingsMetaPath {
		logging.Infof("New settings received: \n%s", string(value))
		config := s.config.Clone()
		config.Update(value)
		setBlockPoolSize(s.config, config)
		s.config = config

		ncpu := common.SetNumCPUs(config["indexer.settings.max_cpu_percent"].Int())
		logging.Infof("Setting maxcpus = %d", ncpu)

		setLogger(config)

		indexerConfig := s.config.SectionConfig("indexer.", true)
		s.supvMsgch <- &MsgConfigUpdate{
			cfg: indexerConfig,
		}
	} else if path == indexCompactonMetaPath {
		currentToken := s.compactionToken
		s.compactionToken = value
		if bytes.Equal(currentToken, value) {
			return nil
		}

		logging.Infof("Manual compaction trigger requested")
		replych := make(chan []IndexStorageStats)
		statReq := &MsgIndexStorageStats{respch: replych}
		s.supvMsgch <- statReq
		stats := <-replych
		// XXX: minFile size check can be applied
		go func() {
			for _, is := range stats {
				errch := make(chan error)
				compactReq := &MsgIndexCompact{
					instId: is.InstId,
					errch:  errch,
				}
				logging.Infof("ManualCompaction: Compacting index instance:%v", is.InstId)
				s.supvMsgch <- compactReq
				err := <-errch
				if err == nil {
					logging.Infof("ManualCompaction: Finished compacting index instance:%v", is.InstId)
				} else {
					logging.Errorf("ManualCompaction: Index instance:%v Compaction failed with reason - %v", is.InstId, err)
				}
			}
		}()
	}

	return nil
}
예제 #19
0
func createIndexRequest(t *testing.T) {

	logging.Infof("********** Start createIndexRequest")

	/*
		DefnId          IndexDefnId     `json:"defnId,omitempty"`
		Name            string          `json:"name,omitempty"`
		Using           IndexType       `json:"using,omitempty"`
		Bucket          string          `json:"bucket,omitempty"`
		IsPrimary       bool            `json:"isPrimary,omitempty"`
		SecExprs        []string        `json:"secExprs,omitempty"`
		ExprType        ExprType        `json:"exprType,omitempty"`
		PartitionScheme PartitionScheme `json:"partitionScheme,omitempty"`
		PartitionKey    string          `json:"partitionKey,omitempty"`
		WhereExpr       string          `json:"where,omitempty"`
		Deferred        bool            `json:"deferred,omitempty"`
		Nodes           []string        `json:"nodes,omitempty"`
	*/

	// Construct request body.
	info := common.IndexDefn{
		DefnId:          common.IndexDefnId(500),
		Name:            "request_handler_test",
		Using:           common.ForestDB,
		Bucket:          "Default",
		IsPrimary:       false,
		SecExprs:        []string{"Testing"},
		ExprType:        common.N1QL,
		WhereExpr:       "Testing",
		PartitionKey:    "Testing",
		PartitionScheme: common.SINGLE,
		Deferred:        false,
		Nodes:           []string{"localhost"},
	}

	req := manager.IndexRequest{Version: uint64(1), Type: manager.CREATE, Index: info}
	body, err := json.Marshal(req)
	if err != nil {
		t.Fatal(err)
	}

	bodybuf := bytes.NewBuffer(body)
	resp, err := http.Post("http://localhost:9102/createIndex", "application/json", bodybuf)
	if err != nil {
		t.Fatal(err)
	}

	validateIndexResponse(resp, t)

	logging.Infof("********** Done createIndexRequest")
}
예제 #20
0
func (meta *metaNotifier) OnIndexCreate(indexDefn *common.IndexDefn) error {

	logging.Infof("clustMgrAgent::OnIndexCreate Notification "+
		"Received for Create Index %v", indexDefn)

	pc := meta.makeDefaultPartitionContainer()

	idxInst := common.IndexInst{InstId: common.IndexInstId(indexDefn.DefnId),
		Defn:  *indexDefn,
		State: common.INDEX_STATE_CREATED,
		Pc:    pc,
	}

	respCh := make(MsgChannel)

	meta.adminCh <- &MsgCreateIndex{mType: CLUST_MGR_CREATE_INDEX_DDL,
		indexInst: idxInst,
		respCh:    respCh}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case MSG_SUCCESS:
			logging.Infof("clustMgrAgent::OnIndexCreate Success "+
				"for Create Index %v", indexDefn)
			return nil

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexCreate Error "+
				"for Create Index %v. Error %v.", indexDefn, res)
			err := res.(*MsgError).GetError()
			return err.cause

		default:
			logging.Fatalf("clustMgrAgent::OnIndexCreate Unknown Response "+
				"Received for Create Index %v. Response %v", indexDefn, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexCreate Unexpected Channel Close "+
			"for Create Index %v", indexDefn)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
예제 #21
0
func runTimerTest() {

	logging.Infof("**** Run Timer Test **********************************************")

	cfg := common.SystemConfig.SectionConfig("indexer", true /*trim*/)
	cfg.Set("storage_dir", common.ConfigValue{"./data/", "metadata file path", "./"})
	os.MkdirAll("./data/", os.ModePerm)

	logging.Infof("***** Start TestStreamMgr ")
	/*
		var requestAddr = "localhost:9885"
		var leaderAddr = "localhost:9884"
	*/
	var config = "./config.json"

	logging.Infof("Start Index Manager")
	donech := make(chan bool)
	factory := new(timerTestProjectorClientFactory)
	factory.donech = donech
	env := new(timerTestProjectorClientEnv)
	admin := manager.NewProjectorAdmin(factory, env, nil)
	//mgr, err := manager.NewIndexManagerInternal(requestAddr, leaderAddr, config, admin)
	mgr, err := manager.NewIndexManagerInternal("localhost:9886", "localhost:"+manager.COORD_MAINT_STREAM_PORT, admin, cfg)
	if err != nil {
		util.TT.Fatal(err)
	}
	mgr.StartCoordinator(config)
	time.Sleep(time.Duration(3000) * time.Millisecond)

	mgr.SetTimestampPersistenceInterval(1)
	defer mgr.SetTimestampPersistenceInterval(manager.TIMESTAMP_PERSIST_INTERVAL)

	logging.Infof("Timer Test Cleanup ...")
	cleanupStreamMgrTimerTest(mgr)

	logging.Infof("***** Run timer Test ...")
	ch := mgr.GetStabilityTimestampChannel(common.MAINT_STREAM)
	go runTimerTestReceiver(mgr, ch, donech)

	logging.Infof("Setup data for Timer Test")
	changeTopologyForTimerTest(mgr)
	<-donech

	logging.Infof("**** Timer Test Cleanup ...")
	cleanupStreamMgrTimerTest(mgr)
	mgr.CleanupTopology()
	mgr.CleanupStabilityTimestamp()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("**** Stop TestStreamMgr. Tearing down ")
	mgr.Close()
	time.Sleep(time.Duration(1000) * time.Millisecond)

	logging.Infof("**** Finish Timer Test *************************************************")
}
예제 #22
0
//send the actual ShutdownStreamRequest on adminport
func sendShutdownTopic(ap *projClient.Client,
	topic string) error {

	logging.Infof("KVSender::sendShutdownTopic Projector %v Topic %v", ap, topic)

	if err := ap.ShutdownTopic(topic); err != nil {
		logging.Fatalf("KVSender::sendShutdownTopic Unexpected Error During "+
			"Shutdown Projector %v Topic %v. Err %v", ap, topic, err)

		return err
	} else {
		logging.Infof("KVSender::sendShutdownTopic Success Projector %v Topic %v", ap, topic)
		return nil
	}
}
예제 #23
0
// NewProjector creates a news projector instance and
// starts a corresponding adminport.
func NewProjector(maxvbs int, config c.Config) *Projector {
	p := &Projector{
		topics:         make(map[string]*Feed),
		topicSerialize: make(map[string]*sync.Mutex),
		maxvbs:         maxvbs,
		pooln:          "default", // TODO: should this be configurable ?
	}

	// Setup dynamic configuration propagation
	config, err := c.GetSettingsConfig(config)
	c.CrashOnError(err)

	pconfig := config.SectionConfig("projector.", true /*trim*/)
	p.name = pconfig["name"].String()
	p.clusterAddr = pconfig["clusterAddr"].String()
	p.adminport = pconfig["adminport.listenAddr"].String()
	ef := config["projector.routerEndpointFactory"]
	config["projector.routerEndpointFactory"] = ef

	p.config = config
	p.ResetConfig(config)

	p.logPrefix = fmt.Sprintf("PROJ[%s]", p.adminport)

	callb := func(cfg c.Config) {
		logging.Infof("%v settings notifier from metakv\n", p.logPrefix)
		cfg.LogConfig(p.logPrefix)
		p.ResetConfig(cfg)
	}
	c.SetupSettingsNotifier(callb, make(chan struct{}))

	cluster := p.clusterAddr
	if !strings.HasPrefix(p.clusterAddr, "http://") {
		cluster = "http://" + cluster
	}

	apConfig := config.SectionConfig("projector.adminport.", true)
	apConfig.SetValue("name", "PRAM")
	reqch := make(chan ap.Request)
	p.admind = ap.NewHTTPServer(apConfig, reqch)

	watchInterval := config["projector.watchInterval"].Int()
	staleTimeout := config["projector.staleTimeout"].Int()
	go p.mainAdminPort(reqch)
	go p.watcherDameon(watchInterval, staleTimeout)
	logging.Infof("%v started ...\n", p.logPrefix)
	return p
}
예제 #24
0
파일: main.go 프로젝트: jchris/indexing
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	common.SeedProcess()

	logging.Infof("Indexer started with command line: %v\n", os.Args)
	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	forestdb.Log = &logging.SystemLogger

	// setup cbauth
	if *auth != "" {
		up := strings.Split(*auth, ":")
		logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
		if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	go platform.DumpOnSignal()
	go common.ExitOnStdinClose()

	config := common.SystemConfig
	config.SetValue("indexer.clusterAddr", *cluster)
	config.SetValue("indexer.numVbuckets", *numVbuckets)
	config.SetValue("indexer.enableManager", *enableManager)
	config.SetValue("indexer.adminPort", *adminPort)
	config.SetValue("indexer.scanPort", *scanPort)
	config.SetValue("indexer.httpPort", *httpPort)
	config.SetValue("indexer.streamInitPort", *streamInitPort)
	config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
	config.SetValue("indexer.streamMaintPort", *streamMaintPort)
	config.SetValue("indexer.storage_dir", *storageDir)

	storage_dir := config["indexer.storage_dir"].String()
	if err := os.MkdirAll(storage_dir, 0755); err != nil {
		common.CrashOnError(err)
	}

	_, msg := indexer.NewIndexer(config)

	if msg.GetMsgType() != indexer.MSG_SUCCESS {
		logging.Warnf("Indexer Failure to Init %v", msg)
	}

	logging.Infof("Indexer exiting normally\n")
}
예제 #25
0
// Start is part of Server interface.
func (s *httpServer) Start() (err error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	if s.lis != nil {
		logging.Errorf("%v already started ...\n", s.logPrefix)
		return ErrorServerStarted
	}

	if s.lis, err = net.Listen("tcp", s.srv.Addr); err != nil {
		logging.Errorf("%v listen failed %v\n", s.logPrefix, err)
		return err
	}

	// Server routine
	go func() {
		defer s.shutdown()

		logging.Infof("%s starting ...\n", s.logPrefix)
		err := s.srv.Serve(s.lis) // serve until listener is closed.
		// TODO: look into error message and skip logging if Stop().
		if err != nil {
			logging.Errorf("%s %v\n", s.logPrefix, err)
		}
	}()

	logging.PeriodicProfile(logging.Trace, s.srv.Addr, "goroutine")
	return
}
예제 #26
0
func (p *timerTestProjectorClientEnv) GetNodeListForBuckets(buckets []string) (map[string]string, error) {

	logging.Infof("timerTestProjectorClientEnv.GetNodeListForBuckets() ")
	nodes := make(map[string]string)
	nodes["127.0.0.1"] = "127.0.0.1"
	return nodes, nil
}
예제 #27
0
func (instance *serviceNotifierInstance) getNotifyCallback(t NotificationType) func(interface{}) error {
	fn := func(msg interface{}) error {
		instance.Lock()
		defer instance.Unlock()

		if !instance.valid {
			return ErrNotifierInvalid
		}

		notifMsg := Notification{
			Type: t,
			Msg:  msg,
		}

		logging.Infof("serviceChangeNotifier: received %s", notifMsg)

		for id, w := range instance.waiters {
			select {
			case w <- notifMsg:
			case <-time.After(notifyWaitTimeout):
				logging.Warnf("servicesChangeNotifier: Consumer for %v took too long to read notification, making the consumer invalid", instance.clusterUrl)
				close(w)
				delete(instance.waiters, id)
			}
		}
		return nil
	}

	return fn
}
예제 #28
0
//handleCloseStream closes MutationStreamReader for the specified stream.
func (m *mutationMgr) handleCloseStream(cmd Message) {

	logging.Infof("MutationMgr::handleCloseStream %v", cmd)

	streamId := cmd.(*MsgStreamUpdate).GetStreamId()

	m.lock.Lock()
	defer m.lock.Unlock()

	//return error if this stream is already closed
	if _, ok := m.streamReaderMap[streamId]; !ok {

		logging.Errorf("MutationMgr::handleCloseStream Stream "+
			"Already Closed %v", streamId)

		m.supvCmdch <- &MsgError{
			err: Error{code: ERROR_MUT_MGR_STREAM_ALREADY_CLOSED,
				severity: NORMAL,
				category: MUTATION_MANAGER}}
		return
	}

	respMsg := m.sendMsgToStreamReader(streamId,
		&MsgGeneral{mType: STREAM_READER_SHUTDOWN})

	if respMsg.GetMsgType() == MSG_SUCCESS {
		//update internal data structures
		m.cleanupStream(streamId)
	}

	//send the message back on supv channel
	m.supvCmdch <- respMsg

}
예제 #29
0
//handleAddIndexListToStream adds a list of indexes to an
//already running MutationStreamReader. If the list has index
//for a bucket for which there is no mutation queue, it will
//be created.
func (m *mutationMgr) handleAddIndexListToStream(cmd Message) {

	logging.Infof("MutationMgr::handleAddIndexListToStream %v", cmd)

	streamId := cmd.(*MsgStreamUpdate).GetStreamId()
	indexList := cmd.(*MsgStreamUpdate).GetIndexList()

	m.lock.Lock()
	defer m.lock.Unlock()

	//return error if this stream is already closed
	if _, ok := m.streamReaderMap[streamId]; !ok {

		logging.Errorf("MutationMgr::handleAddIndexListToStream \n\tStream "+
			"Already Closed %v", streamId)

		m.supvCmdch <- &MsgError{
			err: Error{code: ERROR_MUT_MGR_STREAM_ALREADY_CLOSED,
				severity: NORMAL,
				category: MUTATION_MANAGER}}
		return
	}

	respMsg := m.addIndexListToExistingStream(streamId, indexList, nil)
	//send the message back on supv channel
	m.supvCmdch <- respMsg

}
예제 #30
0
func (m *mutationMgr) handleAbortPersist(cmd Message) {

	logging.Infof("MutationMgr::handleAbortPersist %v", cmd)

	bucket := cmd.(*MsgMutMgrFlushMutationQueue).GetBucket()
	streamId := cmd.(*MsgMutMgrFlushMutationQueue).GetStreamId()

	go func() {
		m.flock.Lock()
		defer m.flock.Unlock()

		//abort the flush for given stream and bucket, if its in progress
		if bucketStopChMap, ok := m.streamFlusherStopChMap[streamId]; ok {
			if stopch, ok := bucketStopChMap[bucket]; ok {
				if stopch != nil {
					close(stopch)
				}
			}
		}
		m.supvRespch <- &MsgMutMgrFlushDone{mType: MUT_MGR_ABORT_DONE,
			bucket:   bucket,
			streamId: streamId}
	}()

	m.supvCmdch <- &MsgSuccess{}

}