//send the actual MutationStreamRequest on adminport func (k *kvSender) sendMutationTopicRequest(ap *projClient.Client, topic string, reqTimestamps *protobuf.TsVbuuid, instances []*protobuf.Instance) (*protobuf.TopicResponse, error) { logging.Infof("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tInstances %v", ap, topic, reqTimestamps.GetBucket(), instances) logging.LazyVerbosef("KVSender::sendMutationTopicRequest RequestTS %v", reqTimestamps.Repr) endpointType := "dataport" if res, err := ap.MutationTopicRequest(topic, endpointType, []*protobuf.TsVbuuid{reqTimestamps}, instances); err != nil { logging.Fatalf("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tUnexpected Error %v", ap, topic, reqTimestamps.GetBucket(), err) return res, err } else { logging.Infof("KVSender::sendMutationTopicRequest Success Projector %v Topic %v %v InstanceIds %v", ap, topic, reqTimestamps.GetBucket(), res.GetInstanceIds()) if logging.IsEnabled(logging.Verbose) { logging.Verbosef("KVSender::sendMutationTopicRequest ActiveTs %v \n\tRollbackTs %v", debugPrintTs(res.GetActiveTimestamps(), reqTimestamps.GetBucket()), debugPrintTs(res.GetRollbackTimestamps(), reqTimestamps.GetBucket())) } return res, nil } }
// // Compute a new request timestamp based on the response from projector. // If all the vb is active for the given requestTs, then this function returns nil. // func recomputeRequestTimestamp(requestTs *protobuf.TsVbuuid, rollbackTimestamps []*protobuf.TsVbuuid) *protobuf.TsVbuuid { newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, requestTs.GetBucket(), len(requestTs.GetVbnos())) rollbackTs := findTimestampForBucket(rollbackTimestamps, requestTs.GetBucket()) for i, vbno := range requestTs.GetVbnos() { offset := findTimestampOffsetForVb(rollbackTs, vbno) if offset != -1 { // there is a failover Ts for this vbno. Use that one for retry. newTs.Append(uint16(vbno), rollbackTs.Seqnos[offset], rollbackTs.Vbuuids[offset], rollbackTs.Snapshots[offset].GetStart(), rollbackTs.Snapshots[offset].GetEnd()) } else { // the vb is not active, just copy from the original requestTS newTs.Append(uint16(vbno), requestTs.Seqnos[i], requestTs.Vbuuids[i], requestTs.Snapshots[i].GetStart(), requestTs.Snapshots[i].GetEnd()) } } return newTs }
func unmarshallTimestamp(str string) (*common.TsVbuuid, error) { data, err := base64.StdEncoding.DecodeString(str) if err != nil { return nil, err } source := new(protobuf.TsVbuuid) if err := proto.Unmarshal(data, source); err != nil { return nil, err } target := common.NewTsVbuuid(source.GetBucket(), NUM_VB) for _, vbno := range source.Vbnos { target.Seqnos[vbno] = source.Seqnos[vbno] target.Vbuuids[vbno] = source.Vbuuids[vbno] } return target, nil }
func (k *kvSender) sendRestartVbuckets(ap *projClient.Client, topic string, connErrVbs []Vbucket, restartTs *protobuf.TsVbuuid) (*protobuf.TopicResponse, error) { logging.Infof("KVSender::sendRestartVbuckets Projector %v Topic %v %v", ap, topic, restartTs.GetBucket()) logging.LazyVerbosef("KVSender::sendRestartVbuckets RestartTs %v", restartTs.Repr) //Shutdown the vbucket before restart if there was a ConnErr. If the vbucket is already //running, projector will ignore the request otherwise if len(connErrVbs) != 0 { logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets %v Topic %v %v ConnErrVbs %v", ap, topic, restartTs.GetBucket(), connErrVbs) // Only shutting down the Vb that receieve connection error. It is probably not harmful // to shutdown every VB in the repairTS, including those that only receive StreamEnd. // But due to network / projecctor latency, a VB StreamBegin may be coming on the way // for those VB (especially when RepairStream has already retried a couple of times). // So shutting all VB in restartTs may unnecessarily causing race condition and // make the protocol longer to converge. ShutdownVbuckets should have no effect on // projector that does not own the Vb. shutdownTs := k.computeShutdownTs(restartTs, connErrVbs) logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets Projector %v Topic %v %v \n\tShutdownTs %v", ap, topic, restartTs.GetBucket(), shutdownTs.Repr()) if err := ap.ShutdownVbuckets(topic, []*protobuf.TsVbuuid{shutdownTs}); err != nil { logging.Errorf("KVSender::sendRestartVbuckets Unexpected Error During "+ "ShutdownVbuckets Request for Projector %v Topic %v. Err %v.", ap, topic, err) //all shutdownVbuckets errors are treated as success as it is a best-effort call. //RestartVbuckets errors will be acted upon. } } if res, err := ap.RestartVbuckets(topic, []*protobuf.TsVbuuid{restartTs}); err != nil { logging.Fatalf("KVSender::sendRestartVbuckets Unexpected Error During "+ "Restart Vbuckets Request for Projector %v Topic %v %v . Err %v.", ap, topic, restartTs.GetBucket(), err) return res, err } else { logging.Infof("KVSender::sendRestartVbuckets Success Projector %v Topic %v %v", ap, topic, restartTs.GetBucket()) if logging.IsEnabled(logging.Verbose) { logging.Verbosef("KVSender::sendRestartVbuckets \nActiveTs %v \nRollbackTs %v", debugPrintTs(res.GetActiveTimestamps(), restartTs.GetBucket()), debugPrintTs(res.GetRollbackTimestamps(), restartTs.GetBucket())) } return res, nil } }