func (c *JoinCommand) Run(args []string, ui cli.Ui) int { cmdFlags := flag.NewFlagSet("join", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } rpcAddr := RPCAddrFlag(cmdFlags) if err := cmdFlags.Parse(args); err != nil { return 1 } addrs := cmdFlags.Args() if len(addrs) == 0 { ui.Error("At least one address to join must be specified.") ui.Error("") ui.Error(c.Help()) return 1 } client, err := RPCClient(*rpcAddr) if err != nil { ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err)) return 1 } defer client.Close() n, err := client.Join(addrs) if err != nil { ui.Error(fmt.Sprintf("Error joining the cluster: %s", err)) return 1 } ui.Output(fmt.Sprintf( "Successfully joined cluster by contacting %d nodes.", n)) return 0 }
func (c *MembersCommand) Run(args []string, ui cli.Ui) int { cmdFlags := flag.NewFlagSet("members", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } rpcAddr := RPCAddrFlag(cmdFlags) if err := cmdFlags.Parse(args); err != nil { return 1 } client, err := RPCClient(*rpcAddr) if err != nil { ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err)) return 1 } defer client.Close() members, err := client.Members() if err != nil { ui.Error(fmt.Sprintf("Error retrieving members: %s", err)) return 1 } for _, member := range members { ui.Output(fmt.Sprintf("%s %s %s", member.Name, member.Addr, member.Status)) } return 0 }
func (c *Command) startShutdownWatcher(agent *Agent, ui cli.Ui) (graceful <-chan struct{}, forceful <-chan struct{}) { g := make(chan struct{}) f := make(chan struct{}) graceful = g forceful = f go func() { <-c.ShutdownCh c.lock.Lock() c.shuttingDown = true c.lock.Unlock() ui.Output("Gracefully shutting down agent...") go func() { if err := agent.Shutdown(); err != nil { ui.Error(fmt.Sprintf("Error: %s", err)) return } close(g) }() select { case <-g: // Gracefully shut down properly case <-c.ShutdownCh: close(f) } }() return }
func (c *ForceLeaveCommand) Run(args []string, ui cli.Ui) int { cmdFlags := flag.NewFlagSet("join", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } rpcAddr := RPCAddrFlag(cmdFlags) if err := cmdFlags.Parse(args); err != nil { return 1 } nodes := cmdFlags.Args() if len(nodes) != 1 { ui.Error("A node name must be specified to force leave.") ui.Error("") ui.Error(c.Help()) return 1 } client, err := RPCClient(*rpcAddr) if err != nil { ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err)) return 1 } defer client.Close() err = client.ForceLeave(nodes[0]) if err != nil { ui.Error(fmt.Sprintf("Error force leaving: %s", err)) return 1 } return 0 }
func (c *MonitorCommand) Run(args []string, ui cli.Ui) int { var logLevel string cmdFlags := flag.NewFlagSet("monitor", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } cmdFlags.StringVar(&logLevel, "log-level", "INFO", "log level") rpcAddr := RPCAddrFlag(cmdFlags) if err := cmdFlags.Parse(args); err != nil { return 1 } client, err := RPCClient(*rpcAddr) if err != nil { ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err)) return 1 } defer client.Close() eventCh := make(chan string) doneCh := make(chan struct{}) if err := client.Monitor(logutils.LogLevel(logLevel), eventCh, doneCh); err != nil { ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) return 1 } eventDoneCh := make(chan struct{}) go func() { defer close(eventDoneCh) for e := range eventCh { ui.Info(e) } c.lock.Lock() defer c.lock.Unlock() if !c.quitting { ui.Info("") ui.Output("Remote side ended the monitor! This usually means that the\n" + "remote side has exited or crashed.") } }() select { case <-eventDoneCh: return 1 case <-c.ShutdownCh: c.lock.Lock() c.quitting = true c.lock.Unlock() } close(doneCh) return 0 }
func (c *Command) Run(args []string, rawUi cli.Ui) int { ui := &cli.PrefixedUi{ OutputPrefix: "==> ", InfoPrefix: " ", ErrorPrefix: "==> ", Ui: rawUi, } var bindAddr string var logLevel string var eventHandlers []string var nodeName string var nodeRole string var rpcAddr string cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } cmdFlags.StringVar(&bindAddr, "bind", "0.0.0.0", "address to bind listeners to") cmdFlags.Var((*AppendSliceValue)(&eventHandlers), "event-handler", "command to execute when events occur") cmdFlags.StringVar(&logLevel, "log-level", "INFO", "log level") cmdFlags.StringVar(&nodeName, "node", "", "node name") cmdFlags.StringVar(&nodeRole, "role", "", "role name") cmdFlags.StringVar(&rpcAddr, "rpc-addr", "127.0.0.1:7373", "address to bind RPC listener to") if err := cmdFlags.Parse(args); err != nil { return 1 } if nodeName == "" { hostname, err := os.Hostname() if err != nil { rawUi.Error(fmt.Sprintf("Error determining hostname: %s", err)) return 1 } nodeName = hostname } config := Config{ NodeName: nodeName, Role: nodeRole, BindAddr: bindAddr, RPCAddr: rpcAddr, EventHandlers: eventHandlers, } eventScripts, err := config.EventScripts() if err != nil { rawUi.Error(err.Error()) return 1 } for _, script := range eventScripts { if !script.Valid() { rawUi.Error(fmt.Sprintf("Invalid event script: %s", script.String())) return 1 } } bindIP, bindPort, err := config.BindAddrParts() if err != nil { rawUi.Error(fmt.Sprintf("Invalid bind address: %s", err)) return 1 } // Setup logging. First create the gated log writer, which will // store logs until we're ready to show them. Then create the level // filter, filtering logs of the specified level. logGate := &GatedWriter{ Writer: &cli.UiWriter{Ui: rawUi}, } logLevelFilter := LevelFilter() logLevelFilter.MinLevel = logutils.LogLevel(strings.ToUpper(logLevel)) logLevelFilter.Writer = logGate if !ValidateLevelFilter(logLevelFilter) { ui.Error(fmt.Sprintf( "Invalid log level: %s. Valid log levels are: %v", logLevelFilter.MinLevel, logLevelFilter.Levels)) return 1 } serfConfig := serf.DefaultConfig() serfConfig.MemberlistConfig.BindAddr = bindIP serfConfig.MemberlistConfig.TCPPort = bindPort serfConfig.MemberlistConfig.UDPPort = bindPort serfConfig.NodeName = nodeName serfConfig.Role = nodeRole agent := &Agent{ EventHandler: &ScriptEventHandler{ Self: serf.Member{ Name: serfConfig.NodeName, Role: serfConfig.Role, }, Scripts: eventScripts, }, LogOutput: logLevelFilter, RPCAddr: rpcAddr, SerfConfig: serfConfig, } ui.Output("Starting Serf agent...") if err := agent.Start(); err != nil { ui.Error(err.Error()) return 1 } ui.Output("Serf agent running!") ui.Info(fmt.Sprintf("Node name: '%s'", config.NodeName)) ui.Info(fmt.Sprintf("Bind addr: '%s:%d'", bindIP, bindPort)) ui.Info(fmt.Sprintf(" RPC addr: '%s'", rpcAddr)) ui.Info("") ui.Output("Log data will now stream in as it occurs:\n") logGate.Flush() graceful, forceful := c.startShutdownWatcher(agent, ui) select { case <-graceful: case <-forceful: // Forcefully shut down, return a bad exit status. return 1 } return 0 }
func (c *EventCommand) Run(args []string, ui cli.Ui) int { cmdFlags := flag.NewFlagSet("event", flag.ContinueOnError) cmdFlags.Usage = func() { ui.Output(c.Help()) } rpcAddr := RPCAddrFlag(cmdFlags) if err := cmdFlags.Parse(args); err != nil { return 1 } args = cmdFlags.Args() if len(args) < 1 { ui.Error("An event name must be specified.") ui.Error("") ui.Error(c.Help()) return 1 } else if len(args) > 2 { ui.Error("Too many command line arguments. Only a name and payload must be specified.") ui.Error("") ui.Error(c.Help()) return 1 } event := args[0] var payload []byte if len(args) == 2 { payload = []byte(args[1]) } client, err := RPCClient(*rpcAddr) if err != nil { ui.Error(fmt.Sprintf("Error connecting to Serf agent: %s", err)) return 1 } defer client.Close() if err := client.UserEvent(event, payload); err != nil { ui.Error(fmt.Sprintf("Error sending event: %s", err)) return 1 } return 0 }