// SendBatch implements Sender. // TODO(tschottdorf): We actually don't want to chop EndTransaction off for // single-range requests (but that happens now since EndTransaction has the // isAlone flag). Whether it is one or not is unknown right now (you can only // find out after you've sent to the Range/looked up a descriptor that suggests // that you're multi-range. In those cases, the wrapped sender should return an // error so that we split and retry once the chunk which contains // EndTransaction (i.e. the last one). func (cs *chunkingSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) { if len(ba.Requests) < 1 { panic("empty batch") } // Deterministically create ClientCmdIDs for all parts of the batch if // a CmdID is already set (otherwise, leave them empty). var nextID func() proto.ClientCmdID empty := proto.ClientCmdID{} if empty == ba.CmdID { nextID = func() proto.ClientCmdID { return empty } } else { rng := rand.New(rand.NewSource(ba.CmdID.Random)) id := ba.CmdID nextID = func() proto.ClientCmdID { curID := id // copy id.Random = rng.Int63() // adjust for next call return curID } } parts := ba.Split() var rplChunks []*proto.BatchResponse for _, part := range parts { ba.Requests = part ba.CmdID = nextID() rpl, err := cs.f(ctx, ba) if err != nil { return nil, err } // Propagate transaction from last reply to next request. The final // update is taken and put into the response's main header. ba.Txn.Update(rpl.Header().Txn) rplChunks = append(rplChunks, rpl) } reply := rplChunks[0] for _, rpl := range rplChunks[1:] { reply.Responses = append(reply.Responses, rpl.Responses...) } reply.ResponseHeader = rplChunks[len(rplChunks)-1].ResponseHeader reply.Txn = ba.Txn return reply, nil }