Ejemplo n.º 1
0
func (c *monitorTestProjectorClient) MutationTopicRequest(topic, endpointType string,
	reqTimestamps []*protobuf.TsVbuuid, instances []*protobuf.Instance) (*protobuf.TopicResponse, error) {

	logging.Infof("monitorTestProjectorClient.MutationTopicRequest(): start")

	if len(reqTimestamps) == 0 {
		util.TT.Fatal("testProjectorClient.MutationTopicRequest(): reqTimestamps is nil")

	}

	response := new(protobuf.TopicResponse)
	response.Topic = &topic
	response.InstanceIds = make([]uint64, len(instances))
	for i, inst := range instances {
		response.InstanceIds[i] = inst.GetIndexInstance().GetInstId()
	}
	response.ActiveTimestamps = reqTimestamps

	if reqTimestamps[0].GetSeqnos()[10] != 406 {
		response.RollbackTimestamps = make([]*protobuf.TsVbuuid, 1)
		response.RollbackTimestamps[0] = protobuf.NewTsVbuuid(manager.DEFAULT_POOL_NAME, reqTimestamps[0].GetBucket(), manager.NUM_VB)
		response.RollbackTimestamps[0].Append(uint16(10), uint64(406), reqTimestamps[0].Vbuuids[10], 0, 0)

		response.Err = protobuf.NewError(projectorC.ErrorStreamRequest)
		return response, projectorC.ErrorStreamRequest
	} else {
		response.RollbackTimestamps = nil
		response.Err = nil
		return response, nil
	}
}
Ejemplo n.º 2
0
func (c *timerTestProjectorClient) InitialRestartTimestamp(pooln, bucketn string) (*protobuf.TsVbuuid, error) {

	newTs := protobuf.NewTsVbuuid("default", bucketn, manager.NUM_VB)
	for i := 0; i < manager.NUM_VB; i++ {
		newTs.Append(uint16(i), uint64(i), uint64(1234), uint64(0), uint64(0))
	}
	return newTs, nil
}
Ejemplo n.º 3
0
func marshallTimestamp(input *common.TsVbuuid) (string, error) {

	ts := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, input.Bucket, NUM_VB)
	ts = ts.FromTsVbuuid(input)
	buf, err := proto.Marshal(ts)
	if err != nil {
		return "", err
	}

	str := base64.StdEncoding.EncodeToString(buf)
	return str, nil
}
Ejemplo n.º 4
0
func makeRestartTsFromTsVbuuid(bucket string, tsVbuuid *c.TsVbuuid,
	vbnos []uint32) (*protobuf.TsVbuuid, error) {

	ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos))
	for _, vbno := range vbnos {
		ts.Append(uint16(vbno), tsVbuuid.Seqnos[vbno],
			tsVbuuid.Vbuuids[vbno], tsVbuuid.Snapshots[vbno][0],
			tsVbuuid.Snapshots[vbno][1])
	}

	return ts, nil

}
Ejemplo n.º 5
0
func (p *streamEndTestProjectorClientEnv) GetNodeListForTimestamps(timestamps []*common.TsVbuuid) (map[string][]*protobuf.TsVbuuid, error) {

	logging.Infof("streamEndTestProjectorClientEnv.GetNodeListForTimestamps() ")
	nodes := make(map[string][]*protobuf.TsVbuuid)
	nodes["127.0.0.1"] = nil

	newTs := protobuf.NewTsVbuuid("default", "Default", 1)
	for i, _ := range timestamps[0].Seqnos {
		newTs.Append(uint16(i), timestamps[0].Seqnos[i], timestamps[0].Vbuuids[i],
			timestamps[0].Snapshots[i][0], timestamps[0].Snapshots[i][1])
	}

	nodes["127.0.0.1"] = append(nodes["127.0.0.1"], newTs)
	return nodes, nil
}
Ejemplo n.º 6
0
func (k *kvSender) makeRestartTsFromKV(bucket string,
	vbnos []uint32) (*protobuf.TsVbuuid, error) {

	flogs, err := k.getFailoverLogs(bucket, vbnos)
	if err != nil {
		logging.Fatalf("KVSender::makeRestartTS Unexpected Error During Failover "+
			"Log Request for Bucket %v. Err %v", bucket, err)
		return nil, err
	}

	ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos))
	ts = ts.ComputeRestartTs(flogs.ToFailoverLog(c.Vbno32to16(vbnos)))

	return ts, nil
}
Ejemplo n.º 7
0
func (k *kvSender) computeShutdownTs(restartTs *protobuf.TsVbuuid, connErrVbs []Vbucket) *protobuf.TsVbuuid {

	numVbuckets := k.config["numVbuckets"].Int()
	shutdownTs := protobuf.NewTsVbuuid(*restartTs.Pool, *restartTs.Bucket, numVbuckets)
	for _, vbno1 := range connErrVbs {
		for i, vbno2 := range restartTs.Vbnos {
			// connErrVbs is a subset of Vb in restartTs.
			if uint32(vbno1) == vbno2 {
				shutdownTs.Append(uint16(vbno1), restartTs.Seqnos[i], restartTs.Vbuuids[i],
					*restartTs.Snapshots[i].Start, *restartTs.Snapshots[i].End)
			}
		}
	}

	return shutdownTs
}
Ejemplo n.º 8
0
//
// Filter the timestamp based on vb list on a certain node
//
func (p *ProjectorClientEnvImpl) FilterTimestampsForNode(timestamps []*protobuf.TsVbuuid,
	node string) ([]*protobuf.TsVbuuid, error) {

	logging.Debugf("ProjectorClientEnvImpl.FilterTimestampsForNode(): start")

	var newTimestamps []*protobuf.TsVbuuid = nil

	for _, ts := range timestamps {

		bucketRef, err := couchbase.GetBucket(COUCHBASE_INTERNAL_BUCKET_URL, DEFAULT_POOL_NAME, ts.GetBucket())
		if err != nil {
			return nil, err
		}

		if err := bucketRef.Refresh(); err != nil {
			return nil, err
		}

		vbmap, err := bucketRef.GetVBmap(nil)
		if err != nil {
			return nil, err
		}

		newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, ts.GetBucket(), NUM_VB)

		for kvaddr, vbnos := range vbmap {
			if kvaddr == node {
				for _, vbno := range vbnos {
					seqno, vbuuid, sStart, sEnd, err := ts.Get(vbno)
					// If cannot get the seqno from this vbno (err != nil), then skip.
					// Otherwise, add to the new timestamp.
					if err == nil {
						newTs.Append(uint16(vbno), seqno, vbuuid, sStart, sEnd)
					}
				}
			}
		}

		if !newTs.IsEmpty() {
			newTimestamps = append(newTimestamps, newTs)
		}
	}

	return newTimestamps, nil
}
Ejemplo n.º 9
0
// InitialRestartTimestamp will compose the initial set of timestamp
// for a subset of vbuckets in `bucket`.
// - return http errors for transport related failures.
func (client *Client) InitialRestartTimestamp(
	pooln, bucketn string) (*protobuf.TsVbuuid, error) {

	// get vbucket map.
	vbmap, err := client.GetVbmap(pooln, bucketn, nil)
	if err != nil {
		return nil, err
	}
	// get failover logs for vbuckets
	pflogs, err := client.GetFailoverLogs(pooln, bucketn, vbmap.AllVbuckets32())
	if err != nil {
		return nil, err
	}
	vbnos := vbmap.AllVbuckets16()
	flogs := pflogs.ToFailoverLog(vbnos)

	ts := protobuf.NewTsVbuuid(pooln, bucketn, client.maxVbuckets)
	return ts.InitialRestartTs(flogs), nil
}
Ejemplo n.º 10
0
func (p *ProjectorClientEnvImpl) findTimestamp(timestampMap map[string][]*protobuf.TsVbuuid,
	kvaddr string,
	bucket string) *protobuf.TsVbuuid {

	timestamps, ok := timestampMap[kvaddr]
	if !ok {
		timestamps = nil
	}

	for _, ts := range timestamps {
		if ts.GetBucket() == bucket {
			return ts
		}
	}

	newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, bucket, NUM_VB)
	timestamps = append(timestamps, newTs)
	timestampMap[kvaddr] = timestamps
	return newTs
}
Ejemplo n.º 11
0
//
// Compute a new request timestamp based on the response from projector.
// If all the vb is active for the given requestTs, then this function returns nil.
//
func recomputeRequestTimestamp(requestTs *protobuf.TsVbuuid,
	rollbackTimestamps []*protobuf.TsVbuuid) *protobuf.TsVbuuid {

	newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, requestTs.GetBucket(), len(requestTs.GetVbnos()))
	rollbackTs := findTimestampForBucket(rollbackTimestamps, requestTs.GetBucket())

	for i, vbno := range requestTs.GetVbnos() {
		offset := findTimestampOffsetForVb(rollbackTs, vbno)
		if offset != -1 {
			// there is a failover Ts for this vbno.  Use that one for retry.
			newTs.Append(uint16(vbno), rollbackTs.Seqnos[offset], rollbackTs.Vbuuids[offset],
				rollbackTs.Snapshots[offset].GetStart(), rollbackTs.Snapshots[offset].GetEnd())
		} else {
			// the vb is not active, just copy from the original requestTS
			newTs.Append(uint16(vbno), requestTs.Seqnos[i], requestTs.Vbuuids[i],
				requestTs.Snapshots[i].GetStart(), requestTs.Snapshots[i].GetEnd())
		}
	}

	return newTs
}
Ejemplo n.º 12
0
func (c *deleteTestProjectorClient) MutationTopicRequest(topic, endpointType string,
	reqTimestamps []*protobuf.TsVbuuid, instances []*protobuf.Instance) (*protobuf.TopicResponse, error) {

	if len(reqTimestamps) == 0 {
		util.TT.Fatal("deleteTestProjectorClient.MutationTopicRequest(): reqTimestamps is nil")
	}

	for _, inst := range instances {
		delete_test_status[inst.GetIndexInstance().GetInstId()] = inst
	}

	response := new(protobuf.TopicResponse)
	response.Topic = &topic
	response.InstanceIds = make([]uint64, len(instances))
	for i, inst := range instances {
		response.InstanceIds[i] = inst.GetIndexInstance().GetInstId()
	}

	response.ActiveTimestamps = nil
	for _, ts := range reqTimestamps {
		newTs := protobuf.NewTsVbuuid("default", ts.GetBucket(), manager.NUM_VB)
		if c.server == "127.0.0.1" {
			for i := 0; i < manager.NUM_VB/2; i++ {
				newTs.Append(uint16(i), uint64(i), uint64(1234), uint64(0), uint64(0))
			}
		}
		if c.server == "127.0.0.2" {
			for i := manager.NUM_VB / 2; i < manager.NUM_VB; i++ {
				newTs.Append(uint16(i), uint64(i), uint64(1234), uint64(0), uint64(0))
			}
		}
		response.ActiveTimestamps = append(response.ActiveTimestamps, newTs)
	}

	response.RollbackTimestamps = nil
	response.Err = nil

	return response, nil
}
Ejemplo n.º 13
0
//
// Create the restart timetamp
//
func makeRestartTimestamp(client ProjectorStreamClient,
	bucket string,
	requestTs *common.TsVbuuid) (*protobuf.TsVbuuid, error) {

	if requestTs == nil {
		// Get the request timestamp from each server that has the bucket (last arg is nil).
		// This should return a full timestamp of all the vbuckets. There is no guarantee that this
		// method will get the latest seqno though (it computes the timestamp from failover log).
		//
		// If the cluster configuration changes:
		// 1) rebalancing - should be fine since vbuuid remains unchanged
		// 2) failover.  This can mean that the timestamp can have stale vbuuid.   Subsequent
		//    call to projector will detect this.
		return client.InitialRestartTimestamp(DEFAULT_POOL_NAME, bucket)

	} else {
		newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, requestTs.Bucket, len(requestTs.Seqnos))
		for i, _ := range requestTs.Seqnos {
			newTs.Append(uint16(i), requestTs.Seqnos[i], requestTs.Vbuuids[i],
				requestTs.Snapshots[i][0], requestTs.Snapshots[i][1])
		}
		return newTs, nil
	}
}
Ejemplo n.º 14
0
func (k *kvSender) restartVbuckets(streamId c.StreamId, restartTs *c.TsVbuuid,
	connErrVbs []Vbucket, respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getProjAddrsForVbuckets(restartTs.Bucket, restartTs.GetVbnos())
	if err != nil {
		logging.Errorf("KVSender::restartVbuckets %v %v Error in fetching cluster info %v",
			streamId, restartTs.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}

		return
	}

	//convert TS to protobuf format
	var protoRestartTs *protobuf.TsVbuuid
	numVbuckets := k.config["numVbuckets"].Int()
	protoTs := protobuf.NewTsVbuuid(DEFAULT_POOL, restartTs.Bucket, numVbuckets)
	protoRestartTs = protoTs.FromTsVbuuid(restartTs)

	var rollbackTs *protobuf.TsVbuuid
	topic := getTopicForStreamId(streamId)
	rollback := false

	fn := func(r int, err error) error {

		for _, addr := range addrs {
			ap := newProjClient(addr)

			if res, ret := k.sendRestartVbuckets(ap, topic, connErrVbs, protoRestartTs); ret != nil {
				//retry for all errors
				logging.Errorf("KVSender::restartVbuckets %v %v Error Received %v from %v",
					streamId, restartTs.Bucket, ret, addr)
				err = ret
			} else {
				rollbackTs = updateRollbackTsFromResponse(restartTs.Bucket, rollbackTs, res)
			}
		}

		if rollbackTs != nil && checkVbListInTS(protoRestartTs.GetVbnos(), rollbackTs) {
			//if rollback, no need to retry
			rollback = true
			return nil
		} else {
			return err
		}
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()

	//if any of the requested vb is in rollback ts, send rollback
	//msg to caller
	if rollback {
		//convert from protobuf to native format
		nativeTs := rollbackTs.ToTsVbuuid(numVbuckets)

		respCh <- &MsgRollback{streamId: streamId,
			rollbackTs: nativeTs}
	} else if err != nil {
		//if there is a topicMissing/genServer.Closed error, a fresh
		//MutationTopicRequest is required.
		if err.Error() == projClient.ErrorTopicMissing.Error() ||
			err.Error() == c.ErrorClosed.Error() ||
			err.Error() == projClient.ErrorInvalidBucket.Error() {
			respCh <- &MsgKVStreamRepair{
				streamId: streamId,
				bucket:   restartTs.Bucket,
			}
		} else {
			respCh <- &MsgError{
				err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
					severity: FATAL,
					cause:    err}}

		}
	} else {
		respCh <- &MsgSuccess{}
	}
}
Ejemplo n.º 15
0
func appHandler(endpoint string, msg interface{}) bool {
	switch v := msg.(type) {
	case []*data.VbKeyVersions:
		for _, vb := range v {
			bucket := vb.GetBucketname()
			m, ok := activity[bucket]
			if !ok {
				m = make(map[uint16][4]uint64)
			}

			vbno, vbuuid := uint16(vb.GetVbucket()), vb.GetVbuuid()
			n, ok := m[vbno]
			if !ok {
				n = [4]uint64{vbuuid, 0, 0, 0}
			}
			for _, kv := range vb.GetKvs() {
				// fmt.Printf("vbucket %v %#v\n", vbno, kv)
				seqno := kv.GetSeqno()
				if seqno > n[1] {
					n[1] = seqno
				}
				typ, start, end := kv.Snapshot()
				if typ != 0 {
					n[2], n[3] = start, end
				}
			}

			m[vbno] = n
			activity[bucket] = m
		}

	case dataport.ConnectionError:
		tss := make([]*protobuf.TsVbuuid, 0)
		for bucket, m := range activity {
			ts := protobuf.NewTsVbuuid("default", bucket, options.maxVbnos)
			for vbno, n := range m {
				if n[1] == n[3] || n[1] > n[2] {
					// seqno, vbuuid, start, end
					ts.Append(vbno, n[1], n[0], n[1], n[1])
				} else {
					// seqno, vbuuid, start, end
					ts.Append(vbno, n[1], n[0], n[2], n[3])
				}
			}
			tss = append(tss, ts)
		}

		// wait for one second and post repair endpoints and restart-vbuckets
		time.Sleep(1 * time.Second)
		endpoints := make([]string, 0)
		endpoints = append(endpoints, options.endpoints...)
		endpoints = append(endpoints, options.coordEndpoint)
		for _, client := range projectors {
			client.RepairEndpoints("backfill", endpoints)
			for _, ts := range tss {
				fmt.Println("RestartVbuckets ....", endpoint, ts.Repr())
			}
			client.RestartVbuckets("backfill", tss)
		}

	case error:
		log.Println("recovery error", v)
		return false

	}
	return true
}