func (krc *keyRegionCache) put(reg hrpc.RegionInfo) []hrpc.RegionInfo { krc.m.Lock() defer krc.m.Unlock() // Remove all the entries that are overlap with the range of the new region. os := krc.getOverlaps(reg) for _, o := range os { krc.regions.Delete(o.Name()) } krc.regions.Put(reg.Name(), func(interface{}, bool) (interface{}, bool) { return reg, true }) return os }
func (krc *keyRegionCache) getOverlaps(reg hrpc.RegionInfo) []hrpc.RegionInfo { var overlaps []hrpc.RegionInfo var v interface{} var err error // deal with empty tree in the beginning so that we don't have to check // EOF errors for enum later if krc.regions.Len() == 0 { return overlaps } enum, ok := krc.regions.Seek(reg.Name()) if !ok { // need to check if there are overlaps before what we found _, _, err = enum.Prev() if err == io.EOF { // we are in the end of tree, get last entry _, v = krc.regions.Last() currReg := v.(hrpc.RegionInfo) if isRegionOverlap(currReg, reg) { return append(overlaps, currReg) } } else { _, v, err = enum.Next() if err == io.EOF { // we are before the beginning of the tree now, get new enum enum.Close() enum, err = krc.regions.SeekFirst() } else { // otherwise, check for overlap before us currReg := v.(hrpc.RegionInfo) if isRegionOverlap(currReg, reg) { overlaps = append(overlaps, currReg) } } } } // now append all regions that overlap until the end of the tree // or until they don't overlap _, v, err = enum.Next() for err == nil && isRegionOverlap(v.(hrpc.RegionInfo), reg) { overlaps = append(overlaps, v.(hrpc.RegionInfo)) _, v, err = enum.Next() } enum.Close() return overlaps }
func (c *client) waitOnRegion(rpc hrpc.Call, reg hrpc.RegionInfo) (proto.Message, error) { ch := reg.AvailabilityChan() if ch == nil { // WTF, this region is available? Maybe it was marked as such // since waitOnRegion was called. return c.sendRPC(rpc) } // The region is unavailable. Wait for it to become available, // or for the deadline to be exceeded. select { case <-ch: return c.sendRPC(rpc) case <-rpc.Context().Done(): return nil, ErrDeadline } }
func (rcc *clientRegionCache) clientDown(reg hrpc.RegionInfo) []hrpc.RegionInfo { rcc.m.Lock() defer rcc.m.Unlock() var downregions []hrpc.RegionInfo c := reg.Client() for _, sharedReg := range rcc.regions[c] { succ := sharedReg.MarkUnavailable() sharedReg.SetClient(nil) if succ { downregions = append(downregions, sharedReg) } } delete(rcc.regions, c) return downregions }
func (rcc *clientRegionCache) del(r hrpc.RegionInfo) { rcc.m.Lock() defer rcc.m.Unlock() c := r.Client() if c != nil { r.SetClient(nil) var index int for i, reg := range rcc.regions[c] { if reg == r { index = i } } rcc.regions[c] = append( rcc.regions[c][:index], rcc.regions[c][index+1:]...) } }
func (c *client) establishRegion(originalReg hrpc.RegionInfo, host string, port uint16) { var err error reg := originalReg backoff := backoffStart for { ctx, _ := context.WithTimeout(context.Background(), regionLookupTimeout) if port != 0 && err == nil { // If this isn't the admin or meta region, check if a client // for this host/port already exists if c.clientType != adminClient && reg != c.metaRegionInfo { client := c.clients.checkForClient(host, port) if client != nil { // There's already a client, add it to the // region and mark it as available. reg.SetClient(client) c.clients.put(client, reg) originalReg.MarkAvailable() return } } // Make this channel buffered so that if we time out we don't // block the newRegion goroutine forever. ch := make(chan newRegResult, 1) var clientType region.ClientType if c.clientType == standardClient { clientType = region.RegionClient } else { clientType = region.MasterClient } go newRegionClient(ctx, ch, clientType, host, port, c.rpcQueueSize, c.flushInterval) select { case res := <-ch: if res.Err == nil { reg.SetClient(res.Client) if c.clientType != adminClient && reg != c.metaRegionInfo { // put will set region client so that as soon as we add // it to the key->region mapping, concurrent readers are // able to find the client c.clients.put(res.Client, reg) if reg != originalReg { removed := c.regions.put(reg) for _, r := range removed { c.clients.del(r) } } } originalReg.MarkAvailable() return } else { err = res.Err } case <-ctx.Done(): err = ErrDeadline } } if err != nil { if err == TableNotFound { c.regions.del(originalReg.Name()) originalReg.MarkAvailable() return } // This will be hit if either there was an error locating the // region, or the region was located but there was an error // connecting to it. backoff, err = sleepAndIncreaseBackoff(ctx, backoff) if err != nil { continue } } if c.clientType == adminClient { host, port, err = c.zkLookup(ctx, c.master) } else if reg == c.metaRegionInfo { host, port, err = c.zkLookup(ctx, c.meta) } else { reg, host, port, err = c.locateRegion(ctx, originalReg.Table(), originalReg.StartKey()) } } }
func (c *client) sendRPCToRegion(rpc hrpc.Call, reg hrpc.RegionInfo) (proto.Message, error) { client := reg.Client() // On the first sendRPC to the meta or admin regions, a goroutine must be // manually kicked off for the meta or admin region client if reg == c.adminRegionInfo && client == nil && !c.adminRegionInfo.IsUnavailable() || reg == c.metaRegionInfo && client == nil && !c.metaRegionInfo.IsUnavailable() { if reg.MarkUnavailable() { go c.reestablishRegion(reg) } } // The region was in the cache, check // if the region is marked as available if reg.IsUnavailable() { return c.waitOnRegion(rpc, reg) } rpc.SetRegion(reg) // Queue the RPC to be sent to the region var err error if client == nil { err = errors.New("no client for this region") } else { err = client.QueueRPC(rpc) } if err != nil { // There was an error queueing the RPC. // Mark the region as unavailable. first := reg.MarkUnavailable() // If this was the first goroutine to mark the region as // unavailable, start a goroutine to reestablish a connection if first { go c.reestablishRegion(reg) } // Block until the region becomes available. return c.waitOnRegion(rpc, reg) } // Wait for the response var res hrpc.RPCResult select { case res = <-rpc.ResultChan(): case <-rpc.Context().Done(): return nil, ErrDeadline } // Check for errors if _, ok := res.Error.(region.RetryableError); ok { // There's an error specific to this region, but // our region client is fine. Mark this region as // unavailable (as opposed to all regions sharing // the client), and start a goroutine to reestablish // it. first := reg.MarkUnavailable() if first { go c.reestablishRegion(reg) } if reg != c.metaRegionInfo && reg != c.adminRegionInfo { // The client won't be in the cache if this is the // meta or admin region c.clients.del(reg) } return c.waitOnRegion(rpc, reg) } else if _, ok := res.Error.(region.UnrecoverableError); ok { // If it was an unrecoverable error, the region client is // considered dead. if reg == c.metaRegionInfo || reg == c.adminRegionInfo { // If this is the admin client or the meta table, mark the // region as unavailable and start up a goroutine to // reconnect if it wasn't already marked as such. first := reg.MarkUnavailable() if first { go c.reestablishRegion(reg) } } else { // Else this is a normal region. Mark all the regions // sharing this region's client as unavailable, and start // a goroutine to reconnect for each of them. downregions := c.clients.clientDown(reg) for _, downreg := range downregions { go c.reestablishRegion(downreg) } } // Fall through to the case of the region being unavailable, // which will result in blocking until it's available again. return c.waitOnRegion(rpc, reg) } else { // RPC was successfully sent, or an unknown type of error // occurred. In either case, return the results. return res.Msg, res.Error } }
func isRegionOverlap(regA, regB hrpc.RegionInfo) bool { return bytes.Equal(regA.Table(), regB.Table()) && bytes.Compare(regA.StartKey(), regB.StopKey()) < 0 && bytes.Compare(regA.StopKey(), regB.StartKey()) > 0 }