func makeMod(r *models.Ring) (torus.Ring, error) { rep := int(r.ReplicationFactor) if rep == 0 { rep = 1 } pil := torus.PeerInfoList(r.Peers) return &mod{ version: int(r.Version), peers: pil, peerlist: sort.StringSlice([]string(pil.PeerList())), rep: rep, }, nil }
func makeMod(r *models.Ring) (torus.Ring, error) { rep := int(r.ReplicationFactor) if rep == 0 { rep = 1 } pil := torus.PeerInfoList(r.Peers) if rep > len(pil) { clog.Noticef("Requested replication level %d, but has only %d peers. Add nodes to match replication.", rep, len(pil)) } return &mod{ version: int(r.Version), peers: pil, rep: rep, }, nil }
func makeKetama(r *models.Ring) (torus.Ring, error) { rep := int(r.ReplicationFactor) if rep == 0 { rep = 1 } pi := torus.PeerInfoList(r.Peers) if rep > len(pi) { clog.Noticef("Using ring that requests replication level %d, but has only %d peers. Add nodes to match replication.", rep, len(pi)) } return &ketama{ version: int(r.Version), peers: pi, rep: rep, ring: hashring.NewWithWeights(pi.GetWeights()), }, nil }
func (c *etcdCtx) GetPeers() (torus.PeerInfoList, error) { promOps.WithLabelValues("get-peers").Inc() resp, err := c.etcd.Client.Get(c.getContext(), MkKey("nodes"), etcdv3.WithPrefix()) if err != nil { return nil, err } var out []*models.PeerInfo for _, x := range resp.Kvs { var p models.PeerInfo err := p.Unmarshal(x.Value) if err != nil { // Intentionally ignore a peer that doesn't unmarshal properly. clog.Errorf("peer at key %s didn't unmarshal correctly", string(x.Key)) continue } if time.Since(time.Unix(0, p.LastSeen)) > peerTimeoutMax { clog.Warningf("peer at key %s didn't unregister; should be fixed with leases in etcdv3", string(x.Key)) continue } out = append(out, &p) } return torus.PeerInfoList(out), nil }