Exemple #1
0
func main() {
	logging.SetLogLevel(logging.Warn)
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdOptions, _, fset, err := querycmd.ParseArgs(os.Args[1:])
	if err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	} else if cmdOptions.Help || len(cmdOptions.OpType) < 1 {
		usage(fset)
		os.Exit(0)
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	client, err := qclient.NewGsiClient(cmdOptions.Server, config)
	if err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	}

	if err = querycmd.HandleCommand(client, cmdOptions, false, os.Stdout); err != nil {
		fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
	}
	client.Close()
}
Exemple #2
0
//panicHandler handles the panic from underlying stream library
func (r *mutationStreamReader) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		logging.Fatalf("MutationStreamReader::panicHandler Received Panic for Stream %v", r.streamId)
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("StreamReader Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//panic from stream library, propagate to supervisor
		msg := &MsgStreamError{streamId: r.streamId,
			err: Error{code: ERROR_STREAM_READER_PANIC,
				severity: FATAL,
				category: STREAM_READER,
				cause:    err}}
		r.supvRespch <- msg
	}
}
Exemple #3
0
func ValidateBucket(cluster, bucket string, uuids []string) bool {

	var cinfo *common.ClusterInfoCache
	url, err := common.ClusterAuthUrl(cluster)
	if err == nil {
		cinfo, err = common.NewClusterInfoCache(url, DEFAULT_POOL)
	}
	if err != nil {
		logging.Fatalf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	cinfo.Lock()
	defer cinfo.Unlock()

	if err := cinfo.Fetch(); err != nil {
		logging.Errorf("Indexer::Fail to init ClusterInfoCache : %v", err)
		common.CrashOnError(err)
	}

	if nids, err := cinfo.GetNodesByBucket(bucket); err == nil && len(nids) != 0 {
		// verify UUID
		currentUUID := cinfo.GetBucketUUID(bucket)
		for _, uuid := range uuids {
			if uuid != currentUUID {
				return false
			}
		}
		return true
	} else {
		logging.Fatalf("Indexer::Error Fetching Bucket Info: %v Nids: %v", err, nids)
		return false
	}

}
//panicHandler handles the panic from index manager
func (c *clustMgrAgent) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("ClusterMgrAgent Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//panic, propagate to supervisor
		msg := &MsgError{
			err: Error{code: ERROR_INDEX_MANAGER_PANIC,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
		c.supvRespch <- msg
	}

}
Exemple #5
0
func main() {
	logging.SetLogLevel(logging.Error)
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdOptions, args, fset, err := querycmd.ParseArgs(os.Args[1:])
	if err != nil {
		logging.Fatalf("%v", err)
		os.Exit(0)
	} else if cmdOptions.Help {
		usage(fset)
		os.Exit(0)
	} else if len(args) < 1 {
		logging.Fatalf("%v", "specify a command")
	}

	b, err := c.ConnectBucket(cmdOptions.Server, "default", "default")
	if err != nil {
		log.Fatal(err)
	}
	defer b.Close()
	maxvb, err := c.MaxVbuckets(b)
	if err != nil {
		log.Fatal(err)
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	client, err := qclient.NewGsiClient(cmdOptions.Server, config)
	if err != nil {
		log.Fatal(err)
	}

	switch args[0] {
	case "sanity":
		err = doSanityTests(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb14786":
		err = doMB14786(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb13339":
		err = doMB13339(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "benchmark":
		doBenchmark(cmdOptions.Server, "localhost:8101")

	case "consistency":
		doConsistency(cmdOptions.Server, maxvb, client)
	}
	client.Close()
}
Exemple #6
0
func processMutations(
	vbs []*protobuf.VbKeyVersions,
	bucketWise map[string]map[byte]int,
	keys map[uint64]map[string]int) int {

	var secvalues []interface{}

	mutations := 0
	for _, vb := range vbs {
		bucket, kvs := vb.GetBucketname(), vb.GetKvs()
		commandWise, ok := bucketWise[bucket]
		if !ok {
			commandWise = make(map[byte]int)
		}

		for _, kv := range kvs {
			mutations++
			uuids, seckeys := kv.GetUuids(), kv.GetKeys()
			for i, command := range kv.GetCommands() {
				cmd, uuid, key := byte(command), uuids[i], string(seckeys[i])
				if _, ok := commandWise[cmd]; !ok {
					commandWise[cmd] = 0
				}
				commandWise[cmd]++

				if cmd == 0 || cmd == c.Snapshot || uuid == 0 || key == "" {
					continue
				}

				m, ok := keys[uuid]
				if !ok {
					m = make(map[string]int)
				}
				if err := json.Unmarshal([]byte(key), &secvalues); err != nil {
					logging.Fatalf("Error in unmarshalling - %v", err)
				} else if len(secvalues) > 0 {
					secJSON, err := json.Marshal(secvalues[:len(secvalues)-1])
					if err != nil {
						logging.Fatalf("Error in marshaling - %v", err)
					}
					key = string(secJSON)
					if _, ok := m[key]; !ok {
						m[key] = 0
					}
					m[key]++
					keys[uuid] = m
				}
			}
		}
		bucketWise[bucket] = commandWise
	}
	return mutations
}
func (meta *metaNotifier) OnIndexBuild(indexDefnList []common.IndexDefnId, buckets []string) map[common.IndexInstId]error {

	logging.Infof("clustMgrAgent::OnIndexBuild Notification "+
		"Received for Build Index %v", indexDefnList)

	respCh := make(MsgChannel)

	var indexInstList []common.IndexInstId
	for _, defnId := range indexDefnList {
		indexInstList = append(indexInstList, common.IndexInstId(defnId))
	}

	meta.adminCh <- &MsgBuildIndex{indexInstList: indexInstList,
		respCh:     respCh,
		bucketList: buckets}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case CLUST_MGR_BUILD_INDEX_DDL_RESPONSE:
			errMap := res.(*MsgBuildIndexResponse).GetErrorMap()
			logging.Infof("clustMgrAgent::OnIndexBuild returns "+
				"for Build Index %v", indexDefnList)
			return errMap

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexBuild Error "+
				"for Build Index %v. Error %v.", indexDefnList, res)
			err := res.(*MsgError).GetError()
			errMap := make(map[common.IndexInstId]error)
			for _, instId := range indexDefnList {
				errMap[common.IndexInstId(instId)] = errors.New(err.String())
			}
			return errMap

		default:
			logging.Fatalf("clustMgrAgent::OnIndexBuild Unknown Response "+
				"Received for Build Index %v. Response %v", indexDefnList, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexBuild Unexpected Channel Close "+
			"for Create Index %v", indexDefnList)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
func (meta *metaNotifier) OnIndexCreate(indexDefn *common.IndexDefn) error {

	logging.Infof("clustMgrAgent::OnIndexCreate Notification "+
		"Received for Create Index %v", indexDefn)

	pc := meta.makeDefaultPartitionContainer()

	idxInst := common.IndexInst{InstId: common.IndexInstId(indexDefn.DefnId),
		Defn:  *indexDefn,
		State: common.INDEX_STATE_CREATED,
		Pc:    pc,
	}

	respCh := make(MsgChannel)

	meta.adminCh <- &MsgCreateIndex{mType: CLUST_MGR_CREATE_INDEX_DDL,
		indexInst: idxInst,
		respCh:    respCh}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case MSG_SUCCESS:
			logging.Infof("clustMgrAgent::OnIndexCreate Success "+
				"for Create Index %v", indexDefn)
			return nil

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexCreate Error "+
				"for Create Index %v. Error %v.", indexDefn, res)
			err := res.(*MsgError).GetError()
			return err.cause

		default:
			logging.Fatalf("clustMgrAgent::OnIndexCreate Unknown Response "+
				"Received for Create Index %v. Response %v", indexDefn, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexCreate Unexpected Channel Close "+
			"for Create Index %v", indexDefn)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
Exemple #9
0
//send the actual MutationStreamRequest on adminport
func (k *kvSender) sendMutationTopicRequest(ap *projClient.Client, topic string,
	reqTimestamps *protobuf.TsVbuuid,
	instances []*protobuf.Instance) (*protobuf.TopicResponse, error) {

	logging.Infof("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tInstances %v",
		ap, topic, reqTimestamps.GetBucket(), instances)

	logging.LazyVerbosef("KVSender::sendMutationTopicRequest RequestTS %v", reqTimestamps.Repr)

	endpointType := "dataport"

	if res, err := ap.MutationTopicRequest(topic, endpointType,
		[]*protobuf.TsVbuuid{reqTimestamps}, instances); err != nil {
		logging.Fatalf("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tUnexpected Error %v", ap,
			topic, reqTimestamps.GetBucket(), err)

		return res, err
	} else {
		logging.Infof("KVSender::sendMutationTopicRequest Success Projector %v Topic %v %v InstanceIds %v",
			ap, topic, reqTimestamps.GetBucket(), res.GetInstanceIds())
		if logging.IsEnabled(logging.Verbose) {
			logging.Verbosef("KVSender::sendMutationTopicRequest ActiveTs %v \n\tRollbackTs %v",
				debugPrintTs(res.GetActiveTimestamps(), reqTimestamps.GetBucket()),
				debugPrintTs(res.GetRollbackTimestamps(), reqTimestamps.GetBucket()))
		}
		return res, nil
	}
}
Exemple #10
0
//run starts the mutation manager loop which listens to messages
//from its workers(stream_reader and flusher) and
//supervisor(indexer)
func (m *mutationMgr) run() {

	defer m.panicHandler()

	go m.handleWorkerMsgs()
	go m.listenWorkerMsgs()

	//main Mutation Manager loop
loop:
	for {
		select {
		case cmd, ok := <-m.supvCmdch:
			if ok {
				if cmd.GetMsgType() == MUT_MGR_SHUTDOWN {
					//shutdown and exit the mutation manager loop
					msg := m.shutdown()
					m.supvCmdch <- msg
					break loop
				} else {
					m.handleSupervisorCommands(cmd)
				}
			} else {
				logging.Fatalf("Supervisor Channel Closed Unexpectedly." +
					"Mutation Manager Shutting Itself Down.")
				m.shutdown()
				break loop
			}
		}
	}

}
Exemple #11
0
//send the actual AddInstances request on adminport
func sendAddInstancesRequest(ap *projClient.Client,
	topic string,
	instances []*protobuf.Instance) (*protobuf.TimestampResponse, error) {

	logging.Infof("KVSender::sendAddInstancesRequest Projector %v Topic %v \nInstances %v",
		ap, topic, instances)

	if res, err := ap.AddInstances(topic, instances); err != nil {
		logging.Fatalf("KVSender::sendAddInstancesRequest Unexpected Error During "+
			"Add Instances Request Projector %v Topic %v IndexInst %v. Err %v", ap,
			topic, instances, err)

		return res, err
	} else {
		logging.Infof("KVSender::sendAddInstancesRequest Success Projector %v Topic %v",
			ap, topic)
		logging.LazyDebug(func() string {
			return fmt.Sprintf(
				"KVSender::sendAddInstancesRequest \n\tActiveTs %v ", debugPrintTs(res.GetCurrentTimestamps(), ""))
		})
		return res, nil

	}

}
Exemple #12
0
func getlogFile() *os.File {
	switch options.logFile {
	case "":
		return nil
	case "tempfile":
		f, err := ioutil.TempFile("", "projector")
		if err != nil {
			logging.Fatalf("%v", err)
		}
		return f
	}
	f, err := os.Create(options.logFile)
	if err != nil {
		logging.Fatalf("%v", err)
	}
	return f
}
Exemple #13
0
// Arg2Key convert JSON string to golang-native.
func Arg2Key(arg []byte) []interface{} {
	var key []interface{}
	if err := json.Unmarshal(arg, &key); err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	}
	return key
}
func (meta *metaNotifier) OnIndexDelete(defnId common.IndexDefnId, bucket string) error {

	logging.Infof("clustMgrAgent::OnIndexDelete Notification "+
		"Received for Drop IndexId %v", defnId)

	respCh := make(MsgChannel)

	//Treat DefnId as InstId for now
	meta.adminCh <- &MsgDropIndex{mType: CLUST_MGR_DROP_INDEX_DDL,
		indexInstId: common.IndexInstId(defnId),
		respCh:      respCh,
		bucket:      bucket}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case MSG_SUCCESS:
			logging.Infof("clustMgrAgent::OnIndexDelete Success "+
				"for Drop IndexId %v", defnId)
			return nil

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexDelete Error "+
				"for Drop IndexId %v. Error %v", defnId, res)
			err := res.(*MsgError).GetError()
			return err.cause

		default:
			logging.Fatalf("clustMgrAgent::OnIndexDelete Unknown Response "+
				"Received for Drop IndexId %v. Response %v", defnId, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {
		logging.Fatalf("clustMgrAgent::OnIndexDelete Unexpected Channel Close "+
			"for Drop IndexId %v", defnId)
		common.CrashOnError(errors.New("Unknown Response"))

	}

	return nil
}
Exemple #15
0
func main() {
	flag.Parse()
	ls, e := net.Listen("tcp", fmt.Sprintf(":%d", *port))
	if e != nil {
		logging.Fatalf("Got an error:  %s", e)
	}

	waitForConnections(ls)
}
Exemple #16
0
//sendMsgToStreamReader sends the provided message to the stream reader
//and sends the response back. In case the stream reader panics during the
//communication, error is captured and returned back.
func (m *mutationMgr) sendMsgToStreamReader(streamId common.StreamId, msg Message) Message {

	//use select to send message to stream reader,
	//in case stream reader has exited, a message on streamReaderExitCh
	//will unblock this call
	select {

	case m.streamReaderCmdChMap[streamId] <- msg:

	case <-m.streamReaderExitChMap[streamId]:
		logging.Fatalf("MutationMgr::sendMsgToStreamReader Unexpected Stream Reader Exit")
		return &MsgError{
			err: Error{code: ERROR_STREAM_READER_PANIC,
				severity: FATAL,
				category: MUTATION_MANAGER}}
	}

	//use select to wait for stream reader response.
	//incase stream reader has exited, a message on streamReaderExitCh
	//will unblock this call
	select {

	case respMsg, ok := <-m.streamReaderCmdChMap[streamId]:
		if ok {
			return respMsg
		} else {
			logging.Fatalf("MutationMgr:sendMsgToStreamReader Internal Error. Unexpected" +
				"Close for Stream Reader Command Channel")
			return &MsgError{
				err: Error{code: ERROR_MUT_MGR_INTERNAL_ERROR,
					severity: FATAL,
					category: MUTATION_MANAGER}}
		}

	case <-m.streamReaderExitChMap[streamId]:
		logging.Fatalf("MutationMgr::sendMsgToStreamReader Unexpected Stream Reader Exit")
		return &MsgError{
			err: Error{code: ERROR_STREAM_READER_PANIC,
				severity: FATAL,
				category: MUTATION_MANAGER}}
	}

}
Exemple #17
0
// NewEndpointFactory to create endpoint instances based on config.
func NewEndpointFactory(cluster string, nvbs int) c.RouterEndpointFactory {

	return func(topic, endpointType, addr string, config c.Config) (c.RouterEndpoint, error) {
		switch endpointType {
		case "dataport":
			return dataport.NewRouterEndpoint(cluster, topic, addr, nvbs, config)
		default:
			logging.Fatalf("Unknown endpoint type\n")
		}
		return nil, nil
	}
}
Exemple #18
0
func (k *kvSender) sendRestartVbuckets(ap *projClient.Client,
	topic string, connErrVbs []Vbucket,
	restartTs *protobuf.TsVbuuid) (*protobuf.TopicResponse, error) {

	logging.Infof("KVSender::sendRestartVbuckets Projector %v Topic %v %v", ap, topic, restartTs.GetBucket())
	logging.LazyVerbosef("KVSender::sendRestartVbuckets RestartTs %v", restartTs.Repr)

	//Shutdown the vbucket before restart if there was a ConnErr. If the vbucket is already
	//running, projector will ignore the request otherwise
	if len(connErrVbs) != 0 {

		logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets %v Topic %v %v ConnErrVbs %v",
			ap, topic, restartTs.GetBucket(), connErrVbs)

		// Only shutting down the Vb that receieve connection error.  It is probably not harmful
		// to shutdown every VB in the repairTS, including those that only receive StreamEnd.
		// But due to network / projecctor latency, a VB StreamBegin may be coming on the way
		// for those VB (especially when RepairStream has already retried a couple of times).
		// So shutting all VB in restartTs may unnecessarily causing race condition and
		// make the protocol longer to converge. ShutdownVbuckets should have no effect on
		// projector that does not own the Vb.
		shutdownTs := k.computeShutdownTs(restartTs, connErrVbs)

		logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets Projector %v Topic %v %v \n\tShutdownTs %v",
			ap, topic, restartTs.GetBucket(), shutdownTs.Repr())

		if err := ap.ShutdownVbuckets(topic, []*protobuf.TsVbuuid{shutdownTs}); err != nil {
			logging.Errorf("KVSender::sendRestartVbuckets Unexpected Error During "+
				"ShutdownVbuckets Request for Projector %v Topic %v. Err %v.", ap,
				topic, err)

			//all shutdownVbuckets errors are treated as success as it is a best-effort call.
			//RestartVbuckets errors will be acted upon.
		}
	}

	if res, err := ap.RestartVbuckets(topic, []*protobuf.TsVbuuid{restartTs}); err != nil {
		logging.Fatalf("KVSender::sendRestartVbuckets Unexpected Error During "+
			"Restart Vbuckets Request for Projector %v Topic %v %v . Err %v.", ap,
			topic, restartTs.GetBucket(), err)

		return res, err
	} else {
		logging.Infof("KVSender::sendRestartVbuckets Success Projector %v Topic %v %v", ap, topic, restartTs.GetBucket())
		if logging.IsEnabled(logging.Verbose) {
			logging.Verbosef("KVSender::sendRestartVbuckets \nActiveTs %v \nRollbackTs %v",
				debugPrintTs(res.GetActiveTimestamps(), restartTs.GetBucket()),
				debugPrintTs(res.GetRollbackTimestamps(), restartTs.GetBucket()))
		}
		return res, nil
	}
}
Exemple #19
0
//handleSupervisorCommands handles the messages from Supervisor
//Each operation acquires the mutex to make the itself atomic.
func (m *mutationMgr) handleSupervisorCommands(cmd Message) {

	switch cmd.GetMsgType() {

	case OPEN_STREAM:
		m.handleOpenStream(cmd)

	case ADD_INDEX_LIST_TO_STREAM:
		m.handleAddIndexListToStream(cmd)

	case REMOVE_INDEX_LIST_FROM_STREAM:
		m.handleRemoveIndexListFromStream(cmd)

	case REMOVE_BUCKET_FROM_STREAM:
		m.handleRemoveBucketFromStream(cmd)

	case CLOSE_STREAM:
		m.handleCloseStream(cmd)

	case CLEANUP_STREAM:
		m.handleCleanupStream(cmd)

	case MUT_MGR_PERSIST_MUTATION_QUEUE:
		m.handlePersistMutationQueue(cmd)

	case MUT_MGR_DRAIN_MUTATION_QUEUE:
		m.handleDrainMutationQueue(cmd)

	case MUT_MGR_GET_MUTATION_QUEUE_HWT:
		m.handleGetMutationQueueHWT(cmd)

	case MUT_MGR_GET_MUTATION_QUEUE_LWT:
		m.handleGetMutationQueueLWT(cmd)

	case UPDATE_INDEX_INSTANCE_MAP:
		m.handleUpdateIndexInstMap(cmd)

	case UPDATE_INDEX_PARTITION_MAP:
		m.handleUpdateIndexPartnMap(cmd)

	case MUT_MGR_ABORT_PERSIST:
		m.handleAbortPersist(cmd)

	case CONFIG_SETTINGS_UPDATE:
		m.handleConfigUpdate(cmd)

	default:
		logging.Fatalf("MutationMgr::handleSupervisorCommands Received Unknown Command %v", cmd)
		common.CrashOnError(errors.New("Unknown Command On Supervisor Channel"))
	}
}
Exemple #20
0
func main() {
	cluster := argParse()[0]

	// setup cbauth
	if options.auth != "" {
		up := strings.Split(options.auth, ":")
		if _, err := cbauth.InternalRetryDefaultInit(cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}
	go startFeed(cluster, "streamwait-feed1")
	go startFeed(cluster, "streamwait-feed2")
	time.Sleep(1000 * time.Second)
}
Exemple #21
0
//panicHandler handles the panic from underlying stream library
func (r *mutationMgr) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("MutationManager Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//shutdown the mutation manager
		select {
		case <-r.shutdownCh:
			//if the shutdown channel is closed, this means shutdown was in progress
			//when panic happened, skip calling shutdown again
		default:
			r.shutdown()
		}

		//panic, propagate to supervisor
		msg := &MsgError{
			err: Error{code: ERROR_MUT_MGR_PANIC,
				severity: FATAL,
				category: MUTATION_MANAGER,
				cause:    err}}
		r.supvRespch <- msg
	}

}
Exemple #22
0
//send the actual ShutdownStreamRequest on adminport
func sendShutdownTopic(ap *projClient.Client,
	topic string) error {

	logging.Infof("KVSender::sendShutdownTopic Projector %v Topic %v", ap, topic)

	if err := ap.ShutdownTopic(topic); err != nil {
		logging.Fatalf("KVSender::sendShutdownTopic Unexpected Error During "+
			"Shutdown Projector %v Topic %v. Err %v", ap, topic, err)

		return err
	} else {
		logging.Infof("KVSender::sendShutdownTopic Success Projector %v Topic %v", ap, topic)
		return nil
	}
}
Exemple #23
0
func (k *kvSender) makeRestartTsFromKV(bucket string,
	vbnos []uint32) (*protobuf.TsVbuuid, error) {

	flogs, err := k.getFailoverLogs(bucket, vbnos)
	if err != nil {
		logging.Fatalf("KVSender::makeRestartTS Unexpected Error During Failover "+
			"Log Request for Bucket %v. Err %v", bucket, err)
		return nil, err
	}

	ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos))
	ts = ts.ComputeRestartTs(flogs.ToFailoverLog(c.Vbno32to16(vbnos)))

	return ts, nil
}
Exemple #24
0
func (feed *DcpFeed) connectToNodes(
	kvaddrs []string, opaque uint16, config map[string]interface{}) error {

	prefix := feed.logPrefix
	kvcache := make(map[string]bool)
	m, err := feed.bucket.GetVBmap(kvaddrs)
	if err != nil {
		fmsg := "%v ##%x GetVBmap(%v) failed: %v\n"
		logging.Fatalf(fmsg, prefix, opaque, kvaddrs, err)
		return memcached.ErrorInvalidFeed
	}
	for kvaddr := range m {
		kvcache[kvaddr] = true
	}

	for _, serverConn := range feed.bucket.getConnPools() {
		if _, ok := kvcache[serverConn.host]; !ok {
			continue
		}
		nodeFeed := feed.nodeFeeds[serverConn.host]
		if nodeFeed != nil {
			nodeFeed.dcpFeed.Close()
			// and continue to spawn a new one ...
		}

		var name DcpFeedName
		if feed.name == "" {
			name = NewDcpFeedName("DefaultDcpClient")
		} else {
			name = feed.name
		}
		singleFeed, err := serverConn.StartDcpFeed(
			name, feed.sequence, feed.output, opaque, config)
		if err != nil {
			for _, nodeFeed := range feed.nodeFeeds {
				nodeFeed.dcpFeed.Close()
			}
			return memcached.ErrorInvalidFeed
		}
		// add the node to the connection map
		feedInfo := &FeedInfo{
			dcpFeed: singleFeed,
			host:    serverConn.host,
		}
		feed.nodeFeeds[serverConn.host] = feedInfo
	}
	return nil
}
Exemple #25
0
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	common.SeedProcess()

	logging.Infof("Indexer started with command line: %v\n", os.Args)
	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	forestdb.Log = &logging.SystemLogger

	// setup cbauth
	if *auth != "" {
		up := strings.Split(*auth, ":")
		logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
		if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	go platform.DumpOnSignal()
	go common.ExitOnStdinClose()

	config := common.SystemConfig
	config.SetValue("indexer.clusterAddr", *cluster)
	config.SetValue("indexer.numVbuckets", *numVbuckets)
	config.SetValue("indexer.enableManager", *enableManager)
	config.SetValue("indexer.adminPort", *adminPort)
	config.SetValue("indexer.scanPort", *scanPort)
	config.SetValue("indexer.httpPort", *httpPort)
	config.SetValue("indexer.streamInitPort", *streamInitPort)
	config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
	config.SetValue("indexer.streamMaintPort", *streamMaintPort)
	config.SetValue("indexer.storage_dir", *storageDir)

	storage_dir := config["indexer.storage_dir"].String()
	if err := os.MkdirAll(storage_dir, 0755); err != nil {
		common.CrashOnError(err)
	}

	_, msg := indexer.NewIndexer(config)

	if msg.GetMsgType() != indexer.MSG_SUCCESS {
		logging.Warnf("Indexer Failure to Init %v", msg)
	}

	logging.Infof("Indexer exiting normally\n")
}
Exemple #26
0
func main() {
	cluster := argParse()

	// setup cbauth
	if options.auth != "" {
		up := strings.Split(options.auth, ":")
		if _, err := cbauth.InternalRetryDefaultInit(cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	for _, bucket := range options.buckets {
		go startBucket(cluster, bucket)
	}
	receive()
}
Exemple #27
0
// Application is example application logic that uses query-port server
func Application(config c.Config) {
	killch := make(chan bool)
	s, err := NewServer(
		"localhost:9990",
		func(req interface{},
			conn net.Conn, quitch <-chan interface{}) {
			requestHandler(req, conn, quitch, killch)
		},
		config)

	if err != nil {
		logging.Fatalf("Listen failed - %v", err)
	}
	<-killch
	s.Close()
}
Exemple #28
0
// GetBulk gets keys in bulk
func (c *Client) GetBulk(vb uint16, keys []string) (map[string]*transport.MCResponse, error) {
	rv := map[string]*transport.MCResponse{}
	going := true

	defer func() {
		going = false
	}()

	errch := make(chan error, 2)

	go func() {
		defer func() { errch <- nil }()
		for going {
			res, err := c.Receive()
			if err != nil {
				errch <- err
				return
			}
			switch res.Opcode {
			case transport.GET:
				going = false
			case transport.GETQ:
			default:
				logging.Fatalf("Unexpected opcode in GETQ response: %+v", res)
			}
			rv[keys[res.Opaque]] = res
		}
	}()

	for i, k := range keys {
		op := transport.GETQ
		if i == len(keys)-1 {
			op = transport.GET
		}
		err := c.Transmit(&transport.MCRequest{
			Opcode:  op,
			VBucket: vb,
			Key:     []byte(k),
			Opaque:  uint32(i),
		})
		if err != nil {
			return rv, err
		}
	}

	return rv, <-errch
}
Exemple #29
0
// GetIndex for bucket/indexName.
func GetIndex(
	client *qclient.GsiClient,
	bucket, indexName string) (*mclient.IndexMetadata, bool) {

	indexes, err := client.Refresh()
	if err != nil {
		logging.Fatalf("%v\n", err)
		os.Exit(1)
	}
	for _, index := range indexes {
		defn := index.Definition
		if defn.Bucket == bucket && defn.Name == indexName {
			return index, true
			//return uint64(index.Definition.DefnId), true
		}
	}
	return nil, false
}
Exemple #30
0
//send the actual DelBuckets request on adminport
func sendDelBucketsRequest(ap *projClient.Client,
	topic string,
	buckets []string) error {

	logging.Infof("KVSender::sendDelBucketsRequest Projector %v Topic %v Buckets %v",
		ap, topic, buckets)

	if err := ap.DelBuckets(topic, buckets); err != nil {
		logging.Fatalf("KVSender::sendDelBucketsRequest Unexpected Error During "+
			"Del Buckets Request Projector %v Topic %v Buckets %v. Err %v", ap,
			topic, buckets, err)

		return err
	} else {
		logging.Infof("KVSender::sendDelBucketsRequest Success Projector %v Topic %v Buckets %v",
			ap, topic, buckets)
		return nil
	}
}