func (this *Kateway) generateFlameGraph(zkzone *zk.ZkZone) { kws, _ := zkzone.KatewayInfos() for _, kw := range kws { if kw.Id != this.id { continue } pprofAddr := kw.DebugAddr if len(pprofAddr) > 0 && pprofAddr[0] == ':' { pprofAddr = kw.Ip + pprofAddr } pprofAddr = fmt.Sprintf("http://%s/debug/pprof/profile", pprofAddr) cmd := pipestream.New(os.Getenv("GOPATH")+"/bin/go-torch", "-u", pprofAddr) err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { fmt.Println(scanner.Text()) } this.Ui.Output("torch.svg generated") } }
func (this *Migrate) executeReassignment() { /* 1. kafka-reassign-partitions.sh write /admin/reassign_partitions 2. controller listens to the path above 3. For each topic partition, the controller does the following: 3.1. Start new replicas in RAR – AR (RAR = Reassigned Replicas, AR = original list of Assigned Replicas) 3.2. Wait until new replicas are in sync with the leader 3.3. If the leader is not in RAR, elect a new leader from RAR 3.4 4. Stop old replicas AR – RAR 3.5. Write new AR 3.6. Remove partition from the /admin/reassign_partitions path */ cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-reassign-partitions.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", this.zkcluster.ZkConnectAddr()), fmt.Sprintf("--reassignment-json-file %s", reassignNodeFilename), fmt.Sprintf("--execute"), ) err := cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } }
func (this *Ssh) ssh(user, relayHost, host string) { expectScript := fmt.Sprintf(` #!/usr/bin/expect spawn ssh %s@%s expect "*to exit*" send "%s\r" send "sudo -s\r" interact `, user, relayHost, host) f, err := ioutil.TempFile("", "expect_script") if err != nil { panic(err) } fname := f.Name() f.Close() defer os.Remove(fname) err = ioutil.WriteFile(fname, []byte(strings.TrimSpace(expectScript)), 0666) if err != nil { panic(err) } cmd := pipestream.New("/usr/bin/expect", fname) err = cmd.Open() if err != nil { panic(err) } defer cmd.Close() }
func (this *Ext4fs) genNoJournalScript(mountPointPattern string) { cmd := pipestream.New("df") err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) fileSystems := make(map[string]string) // dev:mountPoint for scanner.Scan() { tuples := strings.Fields(scanner.Text()) if !patternMatched(tuples[5], mountPointPattern) { continue } fileSystems[tuples[0]] = tuples[5] } swallow(scanner.Err()) for dev, mp := range fileSystems { this.Ui.Output(fmt.Sprintf(`umount %s tune2fs -O ^has_journal %s fsck.ext4 -f %s`, mp, dev, dev)) } }
func (this *Partition) addPartition(zkAddrs string, topic string, partitions int) error { log.Info("adding partitions to topic: %s", topic) cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", zkAddrs), fmt.Sprintf("--alter"), fmt.Sprintf("--topic %s", topic), fmt.Sprintf("--partitions %d", partitions), ) err := cmd.Open() if err != nil { return err } scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } err = scanner.Err() if err != nil { return err } cmd.Close() log.Info("added partitions to topic: %s", topic) return nil }
func (this *WatchLoadAvg) highLoadCount() (n int64, err error) { const threshold = 6. cmd := pipestream.New("consul", "exec", "uptime", "|", "grep", "load") err = cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { line := scanner.Text() load1m, e := ctx.ExtractLoadAvg1m(line) if e != nil { continue } if load1m > threshold { log.Warn(line) n++ } } return }
func (this *Topics) resetTopicConfig(zkcluster *zk.ZkCluster, topic string) { zkAddrs := zkcluster.ZkConnectAddr() key := "retention.ms" cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", zkAddrs), fmt.Sprintf("--alter"), fmt.Sprintf("--topic %s", topic), fmt.Sprintf("--deleteConfig %s", key), ) err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) output := make([]string, 0) for scanner.Scan() { output = append(output, scanner.Text()) } swallow(scanner.Err()) path := zkcluster.GetTopicConfigPath(topic) this.Ui.Info(path) for _, line := range output { this.Ui.Output(line) } }
func (this *ZkCluster) AlterTopic(topic string, ts *sla.TopicSla) (output []string, err error) { zkAddrs := this.ZkConnectAddr() args := []string{ fmt.Sprintf("--zookeeper %s", zkAddrs), fmt.Sprintf("--alter"), fmt.Sprintf("--topic %s", topic), } configs := ts.DumpForAlterTopic() if len(configs) == 0 { err = errors.New("no alter topic configs") return } args = append(args, configs...) cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()), args...) if err = cmd.Open(); err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) output = make([]string, 0) for scanner.Scan() { output = append(output, scanner.Text()) } if err = scanner.Err(); err != nil { return } return }
func (this *ZkCluster) DeleteTopic(topic string) (output []string, err error) { zkAddrs := this.ZkConnectAddr() args := []string{ fmt.Sprintf("--zookeeper %s", zkAddrs), fmt.Sprintf("--delete"), fmt.Sprintf("--topic %s", topic), } cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()), args...) if err = cmd.Open(); err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) output = make([]string, 0) for scanner.Scan() { output = append(output, scanner.Text()) } if err = scanner.Err(); err != nil { return } return }
func (this *Kateway) doVisualize() { cmd := pipestream.New("/usr/local/bin/logstalgia", "-f", this.visualLog) err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { } }
func (this *Rebalance) executeReassignment() { cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-preferred-replica-election.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", this.zkcluster.ZkConnectAddr()), fmt.Sprintf("--path-to-json-file %s", preferredReplicaJsonFile), ) err := cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } }
func (this *Upgrade) runCmd(c string, args []string) { this.Ui.Output(fmt.Sprintf(" %s %+v", c, args)) cmd := pipestream.New(c, args...) err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(fmt.Sprintf(" %s", scanner.Text())) } err = scanner.Err() if err != nil { this.Ui.Error(err.Error()) } }
func playSession(seq int, rounds int) { defer func() { wg.Done() log.Printf("session[%d.%d] done", seq, rounds) }() for i := 0; i < rounds; i++ { testcase := pipestream.New(options.cmd, options.args...) err := testcase.Open() if err != nil { log.Printf("[%d.%d.%d] %s", seq, rounds, i, err.Error()) continue } log.Printf("starting game session[%d.%d.%d]", seq, rounds, i) scanner := bufio.NewScanner(testcase.Reader()) scanner.Split(bufio.ScanLines) var lastLine string for scanner.Scan() { lastLine = scanner.Text() } err = scanner.Err() if err != nil { log.Printf("[%d.%d.%d]: %s", seq, rounds, i, err) } testcase.Close() if strings.HasPrefix(lastLine, "OK") { atomic.AddInt64(&succ, 1) } else { atomic.AddInt64(&fail, 1) } log.Printf("finished game session[%d.%d.%d] %s", seq, rounds, i, lastLine) } }
func (this *Migrate) verify() { cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-reassign-partitions.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", this.zkcluster.ZkConnectAddr()), fmt.Sprintf("--reassignment-json-file %s", reassignNodeFilename), fmt.Sprintf("--verify"), ) err := cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { if strings.Contains(scanner.Text(), "successfully") { this.Ui.Info(scanner.Text()) } else { this.Ui.Warn(scanner.Text()) } } }
func (this *Brokers) doShowVersions() { kafkaVerExp := regexp.MustCompile(`/kafka_(?P<ver>[-\d.]*)\.jar`) processExp := regexp.MustCompile(`kfk_(?P<process>\S*)/config/server.properties`) cmd := pipestream.New("/usr/bin/consul", "exec", "pgrep", "-lf", "java", "|", "grep", "-w", "kafka", "|", "grep", "-vw", "grep") err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) var ( line string lastLine string ) hosts := make(map[string]struct{}) lines := make([]string, 0) header := "Process|Host|Version" lines = append(lines, header) records := make(map[string]map[string]string) // {process: {host: ver}} for scanner.Scan() { line = scanner.Text() if strings.Contains(line, "finished with exit code") { continue } if strings.Contains(line, "node(s) completed") { continue } fields := strings.Fields(line) if len(fields) < 2 { continue } if _, err := strconv.Atoi(fields[1]); err != nil { // field 1 should be pid // if not pid, it continues with last line line = lastLine + strings.Join(fields[1:], " ") // redo fields fields := strings.Fields(line) if len(fields) < 2 { continue } } lastLine = line // version matched := kafkaVerExp.FindStringSubmatch(line) if len(matched) < 2 { continue } ver := matched[1] // process name matched = processExp.FindStringSubmatch(line) if len(matched) < 2 { continue } process := matched[1] // got a valid process record if _, present := records[process]; !present { records[process] = make(map[string]string) } host := fields[0][0 : len(fields[0])-1] // discard the ending ':' hosts[host] = struct{}{} records[process][host] = ver } swallow(scanner.Err()) sortedProceses := make([]string, 0, len(records)) for proc, _ := range records { sortedProceses = append(sortedProceses, proc) } sort.Strings(sortedProceses) procsWithSingleInstance := make([]string, 0) for _, proc := range sortedProceses { sortedHosts := make([]string, 0, len(records[proc])) for host, _ := range records[proc] { sortedHosts = append(sortedHosts, host) } sort.Strings(sortedHosts) if len(sortedHosts) < 2 { procsWithSingleInstance = append(procsWithSingleInstance, proc) } for _, host := range sortedHosts { lines = append(lines, fmt.Sprintf("%s|%s|%s", proc, host, records[proc][host])) } } this.Ui.Output(columnize.SimpleFormat(lines)) this.Ui.Output("") this.Ui.Output(fmt.Sprintf("TOTAL %d processes running on %d hosts", len(lines)-1, len(hosts))) if len(procsWithSingleInstance) > 0 { this.Ui.Output(fmt.Sprintf("\nProcess with 1 SPOF: ")) this.Ui.Warn(fmt.Sprintf("%v", procsWithSingleInstance)) } }