Пример #1
0
// Rescind should be called to indicate you no longer wish to be the leader
func (rl *regionLeader) Rescind() {
	rl.cleanup.Do(func() {
		log.Debugf("[Sync:RegionLeader] Cleaning up leadership of '%v'...", rl.lockNode)
		close(rl.rescinded)
		// keep trying to delete the ZK node (to release leadership) until we're sure it doesn't exist
		for {
			err := zookeeper.Delete(rl.lockNode, -1)
			if err == nil || err == gozk.ErrNoNode {
				log.Debugf("[Sync:RegionLeader] Have deleted leadership node '%v'", rl.lockNode)
				inst.Counter(1.0, "sync.regionleader.rescinded")
				break
			}
			log.Warnf("[Sync:RegionLeader] Failed to cleanup/rescind leadership (will retry): %v", err)
			time.Sleep(cleanupDelay)
		}
	})
}
Пример #2
0
func (r *reaper) reap() {
	r.pathsMtx.RLock()
	// snapshot paths
	keys := make([]string, len(r.paths))
	i := 0
	for k := range r.paths {
		keys[i] = k
		i++
	}
	r.pathsMtx.RUnlock()
	for _, path := range keys {
		exists, stat, err := zk.Exists(path)
		if !exists {
			if err != nil && err != gozk.ErrNoNode {
				log.Errorf("[Sync:RegionLock] Error checking path %s %v", path, err)
				r.resetPath(path)
				// something is still using this
				continue
			}
			// no node
		} else {
			if stat.NumChildren > 0 {
				r.resetPath(path)
				continue
			}
			// node but no children
		}
		// increment reap number
		n := r.incrementPath(path)
		if n >= reaperThreshold {
			// reaped enough times and it's come out as empty. Delete it
			if err := zk.Delete(path, -1); err == nil || err == gozk.ErrNoNode {
				// success
				r.removePath(path)
			} else {
				log.Warnf("[Sync:RegionLock] Error reaping path %s. %v", path, err) // debug not error on purpose
				// some error, most likely node being used by something else, reset and let it live
				r.resetPath(path)
			}
		}
	}
}
Пример #3
0
// findLowestSequenceNode within a particular lock path
func findLowestSequenceNode(path string, seq int) (lowestSeq int, prevSeqPath string, err error) {
	// Read all the children of the node
	// This is why we create sequential nodes under a parent node based on the lock ID
	// If not, say we stored all ephemeral locks under a higher level parent,
	// we would be checking nodes of every lock currently in play, rather than locks
	// on this ID
	children, _, err := zookeeper.Children(path)
	if err != nil {
		return -1, "", err
	}

	var ttl time.Time
	lowestSeq = seq
	prevSeq := 0
	prevSeqPath = ""
	for _, p := range children {
		// Check if this lock has timed out
		data, _, _ := zookeeper.Get(path + "/" + p)
		if len(data) > 0 {
			ttl.GobDecode(data)
			if ttl.Before(time.Now()) {
				log.Tracef("[RegionLock] Deleting expired lock '%s'", path+"/"+p)
				zookeeper.Delete(path+"/"+p, -1)
				continue
			}
		}

		s, err := parseSeq(p)
		if err != nil {
			return -1, "", err
		}
		if s < lowestSeq {
			lowestSeq = s
		}
		if s < seq && s > prevSeq {
			prevSeq = s
			prevSeqPath = p
		}
	}

	return lowestSeq, prevSeqPath, err
}
Пример #4
0
// RegionLeader block indefinitely until this invocation has been elected the "leader" within the local operating region.
// It will then return a channel that will eventually be closed when leadership is rescinded.
func RegionLeader(id string) Leader {
	path := fmt.Sprintf(regionLeaderPath, id)
	prefix := path + "/lock-"
	var lockNode string

	for {
		// create our lock node -- retry until this is done
		for {
			var err error
			lockNode, err = zookeeper.CreateProtectedEphemeralSequential(prefix, []byte{}, gozk.WorldACL(gozk.PermAll))
			if err == gozk.ErrNoNode {
				createParents(path)
			} else if err == nil {
				break
			} else {
				log.Warnf("[Sync:RegionLeader] ZooKeeper error creating ephemeral lock node for leadership election: %s",
					err.Error())
			}
		}

		err := waitForWinner(path, lockNode)
		if err != nil {
			// try to cleanup - then go again
			zookeeper.Delete(lockNode, -1)
			time.Sleep(time.Second)
			continue
		}

		// we are the leader
		break
	}

	log.Infof("[Sync:RegionLeader] Elected leader of '%v'", id)
	inst.Counter(1.0, "sync.regionleader.elected")

	return newRegionLeader(lockNode)
}