func (this *pubStore) Start() (err error) { if ctx.KafkaHome() == "" { return fmt.Errorf("empty kafka_home in ~/.gafka.cf") } if !gio.DirExists(ctx.KafkaHome()) { return fmt.Errorf("kafka not installed in %s, run 'gk deploy -kfkonly'", ctx.KafkaHome()) } // warmup: create pools according the current kafka topology for _, cluster := range meta.Default.ClusterNames() { this.pubPools[cluster] = newPubPool(this, cluster, meta.Default.BrokerList(cluster), this.pubPoolsCapcity) } this.wg.Add(1) go func() { defer this.wg.Done() for { select { case <-meta.Default.RefreshEvent(): this.doRefresh() case <-this.shutdownCh: log.Trace("pub store[%s] stopped", this.Name()) return } } }() return }
func (this *Move) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("move", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.from, "from", "", "") cmdFlags.StringVar(&this.to, "to", "", "") if err := cmdFlags.Parse(args); err != nil { return 1 } recoveryPointCheckpointFile := "recovery-point-offset-checkpoint" highWatermarkFilename := "replication-offset-checkpoint" if validateArgs(this, this.Ui). require("-from", "-to"). invalid(args) { return 2 } parent := filepath.Dir(this.to) if !gio.DirExists(parent) { this.Ui.Error(fmt.Sprintf("target %s not exists", this.to)) return 1 } this.Ui.Warn("shutdown kafka instance before you proceed") this.Ui.Output(fmt.Sprintf("skip %s which flush every 1m, %s which flush every 5s", recoveryPointCheckpointFile, highWatermarkFilename)) this.Ui.Info(fmt.Sprintf("mv %s %s", this.from, this.to)) return }
func mkdirIfNotExist(dir string) (err error) { if gio.DirExists(dir) { return } err = os.MkdirAll(dir, 0700) return }
func (*Deploy) validateLogDirs(dirs string) (invalidDir string) { for _, dir := range strings.Split(dirs, ",") { normDir := strings.TrimRight(dir, "/") parent := path.Dir(normDir) if !gio.DirExists(parent) { invalidDir = dir return // return on 1st invalid dir found } } return }
// TODO // 1. broker id assignment // 2. port assignment func (this *Deploy) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("deploy", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", "", "") cmdFlags.StringVar(&this.cluster, "c", "", "") cmdFlags.StringVar(&this.kafkaBaseDir, "kafka.base", ctx.KafkaHome(), "") cmdFlags.StringVar(&this.brokerId, "broker.id", "", "") cmdFlags.StringVar(&this.tcpPort, "port", "", "") cmdFlags.StringVar(&this.rootPah, "root", "/var/wd", "") cmdFlags.StringVar(&this.ip, "ip", "", "") cmdFlags.StringVar(&this.logDirs, "log.dirs", "", "") cmdFlags.StringVar(&this.runAs, "user", "sre", "") cmdFlags.StringVar(&this.uninstall, "uninstall", "", "") cmdFlags.BoolVar(&this.demoMode, "demo", false, "") cmdFlags.BoolVar(&this.installKafkaOnly, "kfkonly", false, "") cmdFlags.BoolVar(&this.dryRun, "dryrun", true, "") cmdFlags.StringVar(&this.influxDbAddr, "influx", "", "") cmdFlags.StringVar(&this.kafkaVer, "ver", "2.10-0.8.2.2", "") if err := cmdFlags.Parse(args); err != nil { return 1 } if this.uninstall != "" { serverProperties := fmt.Sprintf("%s/config/server.properties", this.uninstall) lines, err := gio.ReadLines(serverProperties) if err != nil { this.Ui.Error(err.Error()) return 2 } var logDirs []string for _, line := range lines { if strings.HasPrefix(line, "log.dirs") { parts := strings.SplitN(line, "=", 2) logDirs = strings.Split(parts[1], ",") break } } if len(logDirs) == 0 { this.Ui.Error("empty log.dirs") return 2 } for _, logDir := range logDirs { this.Ui.Output(fmt.Sprintf("rm -rf %s", logDir)) } name := filepath.Base(this.uninstall) this.Ui.Output(fmt.Sprintf("chkconfig --del %s", name)) this.Ui.Output(fmt.Sprintf("rm -f /etc/init.d/%s", name)) this.Ui.Output(fmt.Sprintf("rm -rf %s", this.uninstall)) return 0 } if !ctx.CurrentUserIsRoot() { this.Ui.Error("requires root priviledges!") return 1 } if !strings.HasSuffix(this.kafkaBaseDir, this.kafkaVer) { this.Ui.Error(fmt.Sprintf("kafka.base[%s] does not match ver[%s]", this.kafkaBaseDir, this.kafkaVer)) return 1 } if this.installKafkaOnly { this.installKafka() return } if validateArgs(this, this.Ui). require("-z", "-c"). invalid(args) { return 2 } this.zkzone = zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) clusers := this.zkzone.Clusters() zkchroot, present := clusers[this.cluster] if !present { this.Ui.Error(fmt.Sprintf("run 'gk clusters -z %s -add %s -p $zkchroot' first!", this.zone, this.cluster)) return 1 } var err error this.userInfo, err = user.Lookup(this.runAs) swallow(err) if this.demoMode { this.demo() return } if validateArgs(this, this.Ui). require("-broker.id", "-port", "-ip", "-log.dirs"). invalid(args) { return 2 } if this.dryRun { this.Ui.Output(fmt.Sprintf("mkdir %s/logs and chown to %s", this.instanceDir(), this.runAs)) } err = os.MkdirAll(fmt.Sprintf("%s/logs", this.instanceDir()), 0755) swallow(err) chown(fmt.Sprintf("%s/logs", this.instanceDir()), this.userInfo) invalidDir := this.validateLogDirs(this.logDirs) if invalidDir != "" { this.Ui.Error(fmt.Sprintf("%s in log.dirs not exists!", invalidDir)) return 2 } // prepare the root directory this.rootPah = strings.TrimSuffix(this.rootPah, "/") if this.dryRun { this.Ui.Output(fmt.Sprintf("mkdir %s/bin and chown to %s", this.instanceDir(), this.runAs)) } err = os.MkdirAll(fmt.Sprintf("%s/bin", this.instanceDir()), 0755) swallow(err) chown(fmt.Sprintf("%s/bin", this.instanceDir()), this.userInfo) if this.dryRun { this.Ui.Output(fmt.Sprintf("mkdir %s/config and chown to %s", this.instanceDir(), this.runAs)) } err = os.MkdirAll(fmt.Sprintf("%s/config", this.instanceDir()), 0755) swallow(err) chown(fmt.Sprintf("%s/config", this.instanceDir()), this.userInfo) type templateVar struct { KafkaBase string BrokerId string TcpPort string Ip string User string ZkChroot string ZkAddrs string InstanceDir string LogDirs string IoThreads string NetworkThreads string InfluxReporterEnabled string InfluxDbHost string InfluxDbPort string } if this.influxDbAddr != "" { this.influxDbHost, this.influxdbPort, err = net.SplitHostPort(this.influxDbAddr) if err != nil { this.Ui.Error(err.Error()) return 2 } if this.influxDbHost == "" || this.influxdbPort == "" { this.Ui.Error("empty influxdb host or port") return 2 } } influxReporterEnabled := "false" if this.influxDbHost != "" { influxReporterEnabled = "true" } data := templateVar{ ZkAddrs: this.zkzone.ZkAddrs(), ZkChroot: zkchroot, KafkaBase: this.kafkaBaseDir, BrokerId: this.brokerId, Ip: this.ip, InstanceDir: this.instanceDir(), User: this.runAs, TcpPort: this.tcpPort, LogDirs: this.logDirs, InfluxReporterEnabled: influxReporterEnabled, InfluxDbHost: this.influxDbHost, InfluxDbPort: this.influxdbPort, } data.IoThreads = strconv.Itoa(3 * len(strings.Split(data.LogDirs, ","))) networkThreads := ctx.NumCPU() / 2 if networkThreads < 2 { networkThreads = 2 } data.NetworkThreads = strconv.Itoa(networkThreads) // TODO not used yet // create the log.dirs directory and chown to sre logDirs := strings.Split(this.logDirs, ",") for _, logDir := range logDirs { if this.dryRun { this.Ui.Output(fmt.Sprintf("mkdir %s and chown to %s", logDir, this.runAs)) } swallow(os.MkdirAll(logDir, 0755)) chown(logDir, this.userInfo) } // package the kafka runtime together if !gio.DirExists(this.kafkaLibDir()) { this.installKafka() } // bin writeFileFromTemplate("template/bin/kafka-topics.sh", fmt.Sprintf("%s/bin/kafka-topics.sh", this.instanceDir()), 0755, nil, this.userInfo) writeFileFromTemplate("template/bin/kafka-reassign-partitions.sh", fmt.Sprintf("%s/bin/kafka-reassign-partitions.sh", this.instanceDir()), 0755, nil, this.userInfo) writeFileFromTemplate("template/bin/kafka-preferred-replica-election.sh", fmt.Sprintf("%s/bin/kafka-preferred-replica-election.sh", this.instanceDir()), 0755, nil, this.userInfo) writeFileFromTemplate("template/bin/kafka-run-class.sh", fmt.Sprintf("%s/bin/kafka-run-class.sh", this.instanceDir()), 0755, data, this.userInfo) writeFileFromTemplate("template/bin/kafka-server-start.sh", fmt.Sprintf("%s/bin/kafka-server-start.sh", this.instanceDir()), 0755, data, this.userInfo) writeFileFromTemplate("template/bin/setenv.sh", fmt.Sprintf("%s/bin/setenv.sh", this.instanceDir()), 0755, data, this.userInfo) // /etc/init.d/ writeFileFromTemplate("template/init.d/kafka", fmt.Sprintf("/etc/init.d/%s", this.clusterName()), 0755, data, nil) // config writeFileFromTemplate("template/config/server.properties", fmt.Sprintf("%s/config/server.properties", this.instanceDir()), 0644, data, this.userInfo) writeFileFromTemplate("template/config/log4j.properties", fmt.Sprintf("%s/config/log4j.properties", this.instanceDir()), 0644, data, this.userInfo) this.Ui.Warn(fmt.Sprintf("NOW, please run the following command:")) this.Ui.Output(color.Red("confirm log.retention.hours")) this.Ui.Output(color.Red("chkconfig --add %s", this.clusterName())) this.Ui.Output(color.Red("/etc/init.d/%s start", this.clusterName())) return }
func (this *Deploy) demo() { var ( maxPort int myPort = -1 myBrokerId = -1 ) this.zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { maxBrokerId := -1 for _, broker := range liveBrokers { if maxPort < broker.Port { maxPort = broker.Port } if cluster == this.cluster { myPort = broker.Port bid, _ := strconv.Atoi(broker.Id) if bid > maxBrokerId { maxBrokerId = bid } myBrokerId = maxBrokerId + 1 // next deployable broker id } } }) ip, err := ctx.LocalIP() swallow(err) if myPort == -1 { // the 1st deployment of this cluster myPort = maxPort + 1 } if myBrokerId == -1 { // 1st broker id starts with 0 myBrokerId = 0 } logDirs := make([]string, 0) for i := 0; i <= 15; i++ { logDir := fmt.Sprintf("/data%d/%s", i, this.clusterName()) if gio.DirExists(filepath.Dir(logDir)) { logDirs = append(logDirs, logDir) } } if len(logDirs) == 0 { // deploy on a small disk host, having no /dataX dirs logDirs = []string{fmt.Sprintf("%s/logs", this.instanceDir())} } influxAddr := ctx.Zone(this.zone).InfluxAddr if influxAddr != "" { this.Ui.Output(fmt.Sprintf("gk deploy -z %s -c %s -broker.id %d -port %d -ip %s -log.dirs %s -influx %s", this.zone, this.cluster, myBrokerId, myPort, ip.String(), strings.Join(logDirs, ","), influxAddr)) } else { this.Ui.Output(fmt.Sprintf("gk deploy -z %s -c %s -broker.id %d -port %d -ip %s -log.dirs %s", this.zone, this.cluster, myBrokerId, myPort, ip.String(), strings.Join(logDirs, ","))) } }