func partitionCmd(r ring.Ring, b *ring.Builder, args []string) error { if b != nil { return fmt.Errorf("cannot use partition command with a builder; generate a ring and use it on that") } if len(args) != 1 { return fmt.Errorf("use the syntax: partition <value>") } p, err := strconv.Atoi(args[0]) if err != nil { return err } first := true for _, n := range r.ResponsibleNodes(uint32(p)) { if first { first = false } else { fmt.Println() } report := [][]string{ []string{"ID:", fmt.Sprintf("%016x", n.ID())}, []string{"Capacity:", fmt.Sprintf("%d", n.Capacity())}, []string{"Tiers:", strings.Join(n.Tiers(), "\n")}, []string{"Addresses:", strings.Join(n.Addresses(), "\n")}, []string{"Meta:", n.Meta()}, } fmt.Print(brimtext.Align(report, nil)) } return nil }
func (rs *ReplValueStore) SetRing(r ring.Ring) { if r == nil { return } rs.ringLock.Lock() if rs.ringCachePath != "" { dir, name := path.Split(rs.ringCachePath) _ = os.MkdirAll(dir, 0755) fp, err := ioutil.TempFile(dir, name) if err != nil { rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err) } else if err := r.Persist(fp); err != nil { fp.Close() os.Remove(fp.Name()) rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err) } else { fp.Close() if err := os.Rename(fp.Name(), rs.ringCachePath); err != nil { os.Remove(fp.Name()) rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err) } } } rs.ring = r var currentAddrs map[string]struct{} if r != nil { nodes := r.Nodes() currentAddrs = make(map[string]struct{}, len(nodes)) for _, n := range nodes { currentAddrs[n.Address(rs.addressIndex)] = struct{}{} } } var shutdownAddrs []string rs.storesLock.RLock() for a := range rs.stores { if _, ok := currentAddrs[a]; !ok { shutdownAddrs = append(shutdownAddrs, a) } } rs.storesLock.RUnlock() if len(shutdownAddrs) > 0 { shutdownStores := make([]*replValueStoreAndTicketChan, len(shutdownAddrs)) rs.storesLock.Lock() for i, a := range shutdownAddrs { shutdownStores[i] = rs.stores[a] rs.stores[a] = nil } rs.storesLock.Unlock() for i, s := range shutdownStores { if err := s.store.Shutdown(context.Background()); err != nil { rs.logDebug("replValueStore: error during shutdown of store %s: %s", shutdownAddrs[i], err) } } } rs.ringLock.Unlock() }
func bootstrapManagedNodes(ring ring.Ring, ccport int, ctxlog *log.Entry, gopts ...grpc.DialOption) map[uint64]ManagedNode { nodes := ring.Nodes() m := make(map[uint64]ManagedNode, len(nodes)) for _, node := range nodes { addr, err := ParseManagedNodeAddress(node.Address(0), ccport) if err != nil { ctxlog.WithFields(log.Fields{ "node": node.ID(), "address": node.Address(0), }).Info("Can't split address in node (skipped node)") continue } m[node.ID()], err = NewManagedNode(&ManagedNodeOpts{Address: addr, GrpcOpts: gopts}) if err != nil { ctxlog.WithFields(log.Fields{ "node": node.ID(), "address": node.Address(0), "err": err, }).Warning("Unable to bootstrap node") } else { ctxlog.WithFields(log.Fields{ "node": node.ID(), "address": node.Address(0), }).Debug("Added node") } } return m }
func tierCmd(r ring.Ring, b *ring.Builder, args []string) error { var tiers [][]string if r == nil { tiers = b.Tiers() } else { tiers = r.Tiers() } report := [][]string{ []string{"Tier", "Existing"}, []string{"Level", "Values"}, } reportOpts := brimtext.NewDefaultAlignOptions() reportOpts.Alignments = []brimtext.Alignment{brimtext.Right, brimtext.Left} fmted := false OUT: for _, values := range tiers { for _, value := range values { if strings.Contains(value, " ") { fmted = true break OUT } } } for level, values := range tiers { sort.Strings(values) var pvalue string if fmted { for _, value := range values { pvalue += fmt.Sprintf(" %#v", value) } pvalue = strings.Trim(pvalue, " ") } else { pvalue = strings.Join(values, " ") } report = append(report, []string{ strconv.Itoa(level), strings.Trim(pvalue, " "), }) } fmt.Print(brimtext.Align(report, reportOpts)) return nil }
func mainCmd(r ring.Ring, b *ring.Builder) error { if r != nil { // TODO: // Version Info (the value as well as the time translation) // Number of tier levels // Replica count // Indication of how risky the assignments are: // Replicas not in distinct tiers, nodes s := r.Stats() report := [][]string{ []string{brimtext.ThousandsSep(int64(s.PartitionCount), ","), "Partitions"}, []string{brimtext.ThousandsSep(int64(s.PartitionBitCount), ","), "Partition Bits"}, []string{brimtext.ThousandsSep(int64(s.NodeCount), ","), "Nodes"}, []string{brimtext.ThousandsSep(int64(s.InactiveNodeCount), ","), "Inactive Nodes"}, []string{brimtext.ThousandsSepU(s.TotalCapacity, ","), "Total Node Capacity"}, []string{fmt.Sprintf("%.02f%%", s.MaxUnderNodePercentage), fmt.Sprintf("Worst Underweight Node (ID %016x)", s.MaxUnderNodeID)}, []string{fmt.Sprintf("%.02f%%", s.MaxOverNodePercentage), fmt.Sprintf("Worst Overweight Node (ID %016x)", s.MaxOverNodeID)}, } reportOpts := brimtext.NewDefaultAlignOptions() reportOpts.Alignments = []brimtext.Alignment{brimtext.Right, brimtext.Left} fmt.Print(brimtext.Align(report, reportOpts)) } if b != nil { // TODO: // Inactive node count // Total capacity report := [][]string{ []string{brimtext.ThousandsSep(int64(len(b.Nodes())), ","), "Nodes"}, []string{brimtext.ThousandsSep(int64(b.ReplicaCount()), ","), "Replicas"}, []string{brimtext.ThousandsSep(int64(b.PointsAllowed()), ","), "Points Allowed"}, []string{brimtext.ThousandsSep(int64(b.MaxPartitionBitCount()), ","), "Max Partition Bits"}, []string{brimtext.ThousandsSep(int64(b.MoveWait()), ","), "Move Wait"}, } reportOpts := brimtext.NewDefaultAlignOptions() reportOpts.Alignments = []brimtext.Alignment{brimtext.Right, brimtext.Left} fmt.Print(brimtext.Align(report, reportOpts)) return nil } return nil }
//TODO: Need concurrency, we should just fire of replicates in goroutines // and collects the results. On a failure we still need to send the rollback // or have the slave's commit deadline trigger. func (s *Server) replicateRing(r ring.Ring, rb, bb *[]byte) error { failcount := 0 for _, slave := range s.slaves { ctx, _ := context.WithTimeout(context.Background(), time.Duration(_SYN_REGISTER_TIMEOUT)*time.Second) i := &pb.RingMsg{ Version: r.Version(), Ring: *rb, Builder: *bb, Deadline: time.Now().Add(60 * time.Second).Unix(), Rollback: s.r.Version(), } res, err := slave.client.Store(ctx, i) if err != nil { log.Println(err) failcount++ continue } if res.Version != r.Version() { log.Printf("Version or master on remote node %+v did not match local entries. Got %+v.", slave, res) failcount++ continue } if !res.Ring || !res.Builder { log.Printf("res is: %#v\n", res) log.Printf("Slave failed to store ring or builder: %s", res.ErrMsg) failcount++ continue } log.Printf("<-- Slave response: %+v", res) slave.version = res.Version slave.last = time.Now() slave.status = true } if failcount > (len(s.slaves) / 2) { return fmt.Errorf("Failed to get replication majority") } return nil }
func persist(r ring.Ring, b *ring.Builder, filename string) error { dir, name := path.Split(filename) if dir == "" { dir = "." } f, err := ioutil.TempFile(dir, name+".") if err != nil { return err } tmp := f.Name() if r != nil { err = r.Persist(f) } else { err = b.Persist(f) } if err != nil { f.Close() return err } if err = f.Close(); err != nil { return err } return os.Rename(path.Join(dir, tmp), filename) }
func nodeCmd(r ring.Ring, b *ring.Builder, args []string, full bool) (changed bool, err error) { var nodes ring.NodeSlice if r != nil { nodes = r.Nodes() } else { bnodes := b.Nodes() nodes = make(ring.NodeSlice, len(bnodes)) for i := len(nodes) - 1; i >= 0; i-- { nodes[i] = bnodes[i] } } filterArgs := make([]string, 0) setArgs := make([]string, 0) setFound := false for _, arg := range args { if arg == "set" { setFound = true continue } if setFound { setArgs = append(setArgs, arg) } else { filterArgs = append(filterArgs, arg) } } if len(setArgs) > 0 && b == nil { err = fmt.Errorf("set is only valid for builder files") return } if nodes, err = nodes.Filter(filterArgs); err != nil { return } if len(nodes) > 0 && len(setArgs) > 0 { for _, n := range nodes { if err = addOrSetCmd(r, b, setArgs, n.(ring.BuilderNode)); err != nil { return } changed = true } } if full || len(nodes) == 1 { first := true for _, n := range nodes { if first { first = false } else { fmt.Println() } report := [][]string{ []string{"ID:", fmt.Sprintf("%016x", n.ID())}, []string{"Active:", fmt.Sprintf("%v", n.Active())}, []string{"Capacity:", fmt.Sprintf("%d", n.Capacity())}, []string{"Tiers:", strings.Join(n.Tiers(), "\n")}, []string{"Addresses:", strings.Join(n.Addresses(), "\n")}, []string{"Meta:", n.Meta()}, } fmt.Print(brimtext.Align(report, nil)) } return } header := []string{ "ID", "Active", "Capacity", "Address", "Meta", } report := [][]string{header} reportAlign := brimtext.NewDefaultAlignOptions() reportAlign.Alignments = []brimtext.Alignment{ brimtext.Left, brimtext.Right, brimtext.Right, brimtext.Right, brimtext.Left, } reportLine := func(n ring.Node) []string { return []string{ fmt.Sprintf("%016x", n.ID()), fmt.Sprintf("%v", n.Active()), fmt.Sprintf("%d", n.Capacity()), n.Address(0), n.Meta(), } } for _, n := range nodes { if n.Active() { report = append(report, reportLine(n)) } } for _, n := range nodes { if !n.Active() { report = append(report, reportLine(n)) } } fmt.Print(brimtext.Align(report, reportAlign)) return }