Ejemplo n.º 1
0
func (k *kvSender) deleteIndexesFromStream(streamId c.StreamId, indexInstList []c.IndexInst,
	respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error in fetching cluster info %v",
			streamId, indexInstList[0].Defn.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	var uuids []uint64
	for _, indexInst := range indexInstList {
		uuids = append(uuids, uint64(indexInst.InstId))
	}

	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {
			execWithStopCh(func() {
				ap := newProjClient(addr)
				if ret := sendDelInstancesRequest(ap, topic, uuids); ret != nil {
					logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v from %v",
						streamId, indexInstList[0].Defn.Bucket, ret, addr)
					//Treat TopicMissing/GenServer.Closed/InvalidBucket as success
					if ret.Error() == projClient.ErrorTopicMissing.Error() ||
						ret.Error() == c.ErrorClosed.Error() ||
						ret.Error() == projClient.ErrorInvalidBucket.Error() {
						logging.Infof("KVSender::deleteIndexesFromStream %v %v Treating %v As Success",
							streamId, indexInstList[0].Defn.Bucket, ret)
					} else {
						err = ret
					}
				}
			}, stopCh)
		}
		return err
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()
	if err != nil {
		logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v",
			streamId, indexInstList[0].Defn.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	respCh <- &MsgSuccess{}
}
Ejemplo n.º 2
0
func (k *kvSender) closeMutationStream(streamId c.StreamId, bucket string,
	respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::closeMutationStream %v %v Error in fetching cluster info %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {
			execWithStopCh(func() {
				ap := newProjClient(addr)
				if ret := sendShutdownTopic(ap, topic); ret != nil {
					logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v from %v",
						streamId, bucket, ret, addr)
					//Treat TopicMissing/GenServer.Closed as success
					if ret.Error() == projClient.ErrorTopicMissing.Error() ||
						ret.Error() == c.ErrorClosed.Error() {
						logging.Infof("KVSender::closeMutationStream %v %v Treating %v As Success",
							streamId, bucket, ret)
					} else {
						err = ret
					}
				}
			}, stopCh)
		}
		return err
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()
	if err != nil {
		logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v", streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	respCh <- &MsgSuccess{}

}
Ejemplo n.º 3
0
func GetCurrentKVTs(cluster, pooln, bucketn string, numVbs int) (Timestamp, error) {

	var seqnos []uint64

	fn := func(r int, err error) error {
		if r > 0 {
			logging.Warnf("Indexer::getCurrentKVTs error=%v Retrying (%d)", err, r)
		}

		seqnos, err = common.BucketSeqnos(cluster, pooln, bucketn)
		return err
	}

	start := time.Now()
	rh := common.NewRetryHelper(MAX_GETSEQS_RETRIES, time.Millisecond, 1, fn)
	err := rh.Run()

	if err != nil {
		// then log an error and give-up
		fmsg := "Indexer::getCurrentKVTs Error Connecting to KV Cluster %v"
		logging.Errorf(fmsg, err)
		return nil, err
	}
	if len(seqnos) != numVbs {
		fmsg := "BucketSeqnos(): got ts only for %v vbs"
		return nil, fmt.Errorf(fmsg, len(seqnos))
	}

	ts := NewTimestamp(numVbs)
	for i, seqno := range seqnos {
		ts[i] = Seqno(seqno)
	}

	elapsed := time.Since(start)
	logging.Tracef("Indexer::getCurrentKVTs Time Taken %v \n\t TS Returned %v", elapsed, ts)
	return ts, err
}
Ejemplo n.º 4
0
func (k *kvSender) addIndexForExistingBucket(streamId c.StreamId, bucket string, indexInstList []c.IndexInst,
	respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error in fetching cluster info %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	var currentTs *protobuf.TsVbuuid
	protoInstList := convertIndexListToProto(k.config, k.cInfoCache, indexInstList, streamId)
	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {
			execWithStopCh(func() {
				ap := newProjClient(addr)
				if res, ret := sendAddInstancesRequest(ap, topic, protoInstList); ret != nil {
					logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error Received %v from %v",
						streamId, bucket, ret, addr)
					err = ret
				} else {
					currentTs = updateCurrentTsFromResponse(bucket, currentTs, res)
				}
			}, stopCh)
		}

		//check if we have received currentTs for all vbuckets
		numVbuckets := k.config["numVbuckets"].Int()
		if currentTs == nil || currentTs.Len() != numVbuckets {
			return errors.New("ErrPartialVbStart")
		} else {
			return err
		}

	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()
	if err != nil {
		logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error Received %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	numVbuckets := k.config["numVbuckets"].Int()
	nativeTs := currentTs.ToTsVbuuid(numVbuckets)

	respCh <- &MsgStreamUpdate{mType: MSG_SUCCESS,
		streamId:  streamId,
		bucket:    bucket,
		restartTs: nativeTs}
}
Ejemplo n.º 5
0
func (k *kvSender) restartVbuckets(streamId c.StreamId, restartTs *c.TsVbuuid,
	connErrVbs []Vbucket, respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getProjAddrsForVbuckets(restartTs.Bucket, restartTs.GetVbnos())
	if err != nil {
		logging.Errorf("KVSender::restartVbuckets %v %v Error in fetching cluster info %v",
			streamId, restartTs.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}

		return
	}

	//convert TS to protobuf format
	var protoRestartTs *protobuf.TsVbuuid
	numVbuckets := k.config["numVbuckets"].Int()
	protoTs := protobuf.NewTsVbuuid(DEFAULT_POOL, restartTs.Bucket, numVbuckets)
	protoRestartTs = protoTs.FromTsVbuuid(restartTs)

	var rollbackTs *protobuf.TsVbuuid
	topic := getTopicForStreamId(streamId)
	rollback := false

	fn := func(r int, err error) error {

		for _, addr := range addrs {
			ap := newProjClient(addr)

			if res, ret := k.sendRestartVbuckets(ap, topic, connErrVbs, protoRestartTs); ret != nil {
				//retry for all errors
				logging.Errorf("KVSender::restartVbuckets %v %v Error Received %v from %v",
					streamId, restartTs.Bucket, ret, addr)
				err = ret
			} else {
				rollbackTs = updateRollbackTsFromResponse(restartTs.Bucket, rollbackTs, res)
			}
		}

		if rollbackTs != nil && checkVbListInTS(protoRestartTs.GetVbnos(), rollbackTs) {
			//if rollback, no need to retry
			rollback = true
			return nil
		} else {
			return err
		}
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()

	//if any of the requested vb is in rollback ts, send rollback
	//msg to caller
	if rollback {
		//convert from protobuf to native format
		nativeTs := rollbackTs.ToTsVbuuid(numVbuckets)

		respCh <- &MsgRollback{streamId: streamId,
			rollbackTs: nativeTs}
	} else if err != nil {
		//if there is a topicMissing/genServer.Closed error, a fresh
		//MutationTopicRequest is required.
		if err.Error() == projClient.ErrorTopicMissing.Error() ||
			err.Error() == c.ErrorClosed.Error() ||
			err.Error() == projClient.ErrorInvalidBucket.Error() {
			respCh <- &MsgKVStreamRepair{
				streamId: streamId,
				bucket:   restartTs.Bucket,
			}
		} else {
			respCh <- &MsgError{
				err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
					severity: FATAL,
					cause:    err}}

		}
	} else {
		respCh <- &MsgSuccess{}
	}
}
Ejemplo n.º 6
0
func (k *kvSender) openMutationStream(streamId c.StreamId, indexInstList []c.IndexInst,
	restartTs *c.TsVbuuid, respCh MsgChannel, stopCh StopChannel) {

	if len(indexInstList) == 0 {
		logging.Warnf("KVSender::openMutationStream Empty IndexList. Nothing to do.")
		respCh <- &MsgSuccess{}
		return
	}

	protoInstList := convertIndexListToProto(k.config, k.cInfoCache, indexInstList, streamId)
	bucket := indexInstList[0].Defn.Bucket

	//use any bucket as list of vbs remain the same for all buckets
	vbnos, err := k.getAllVbucketsInCluster(bucket)
	if err != nil {
		logging.Errorf("KVSender::openMutationStream %v %v Error in fetching vbuckets info %v",
			streamId, restartTs.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	restartTsList, err := k.makeRestartTsForVbs(bucket, restartTs, vbnos)
	if err != nil {
		logging.Errorf("KVSender::openMutationStream %v %v Error making restart ts %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::openMutationStream %v %v Error Fetching Projector Addrs %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	var rollbackTs *protobuf.TsVbuuid
	var activeTs *protobuf.TsVbuuid
	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {

			execWithStopCh(func() {
				ap := newProjClient(addr)
				if res, ret := k.sendMutationTopicRequest(ap, topic, restartTsList, protoInstList); ret != nil {
					//for all errors, retry
					logging.Errorf("KVSender::openMutationStream %v %v Error Received %v from %v",
						streamId, bucket, ret, addr)
					err = ret
				} else {
					activeTs = updateActiveTsFromResponse(bucket, activeTs, res)
					rollbackTs = updateRollbackTsFromResponse(bucket, rollbackTs, res)
				}
			}, stopCh)
		}

		if rollbackTs != nil {
			//no retry required for rollback
			return nil
		} else if err != nil {
			//retry for any error
			return err
		} else {
			//check if we have received activeTs for all vbuckets
			retry := false
			if activeTs == nil || activeTs.Len() != len(vbnos) {
				retry = true
			}

			if retry {
				return errors.New("ErrPartialVbStart")
			} else {
				return nil
			}

		}
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()

	if rollbackTs != nil {
		logging.Infof("KVSender::openMutationStream %v %v Rollback Received %v",
			streamId, bucket, rollbackTs)
		//convert from protobuf to native format
		numVbuckets := k.config["numVbuckets"].Int()
		nativeTs := rollbackTs.ToTsVbuuid(numVbuckets)
		respCh <- &MsgRollback{streamId: streamId,
			bucket:     bucket,
			rollbackTs: nativeTs}
	} else if err != nil {
		logging.Errorf("KVSender::openMutationStream %v %v Error Received %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
	} else {
		numVbuckets := k.config["numVbuckets"].Int()
		respCh <- &MsgSuccessOpenStream{activeTs: activeTs.ToTsVbuuid(numVbuckets)}
	}
}