func ParseWithConsulOptions() (string, kp.Options) { url := kingpin.Flag("consul", "The hostname and port of a consul agent in the p2 cluster. Defaults to 0.0.0.0:8500.").String() token := kingpin.Flag("token", "The consul ACL token to use. Empty by default.").String() tokenFile := kingpin.Flag("token-file", "The file containing the Consul ACL token").ExistingFile() headers := kingpin.Flag("header", "An HTTP header to add to requests, in KEY=VALUE form. Can be specified multiple times.").StringMap() https := kingpin.Flag("https", "Use HTTPS").Bool() wait := kingpin.Flag("wait", "Maximum duration for Consul watches, before resetting and starting again.").Default("30s").Duration() cmd := kingpin.Parse() if *tokenFile != "" { tokenBytes, err := ioutil.ReadFile(*tokenFile) if err != nil { log.Fatalln(err) } *token = string(tokenBytes) } return cmd, kp.Options{ Address: *url, Token: *token, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, WaitTime: *wait, } }
// MonitorPodHealth is meant to be a long running go routine. // MonitorPodHealth reads from a consul store to determine which // services should be running on the host. MonitorPodHealth // runs a CheckHealth routine to monitor the health of each // service and kills routines for services that should no // longer be running. func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) { var store kp.Store consul := config.ConsulAddress node := config.NodeName pods := []PodWatch{} authtoken, err := preparer.LoadConsulToken(config.ConsulTokenPath) if err != nil { logger.WithField("inner_err", err).Warningln("Could not load consul token") } store = kp.NewConsulStore(kp.Options{ Address: consul, HTTPS: false, Token: authtoken, Client: net.NewHeaderClient(nil, http.DefaultTransport), }) pods = updateHealthMonitors(store, pods, node, logger) for { select { case <-time.After(POLL_KV_FOR_PODS): // check if pods have been added or removed // starts monitor routine for new pods // kills monitor routine for removed pods pods = updateHealthMonitors(store, pods, node, logger) case <-shutdownCh: return } } }
func ParseWithConsulOptions() (string, kp.Options) { url := kingpin.Flag("consul", "The hostname and port of a consul agent in the p2 cluster. Defaults to 0.0.0.0:8500.").String() token := kingpin.Flag("token", "The consul ACL token to use. Empty by default.").String() tokenFile := kingpin.Flag("token-file", "The file containing the Consul ACL token").ExistingFile() headers := kingpin.Flag("header", "An HTTP header to add to requests, in KEY=VALUE form. Can be specified multiple times.").StringMap() https := kingpin.Flag("https", "Use HTTPS").Bool() wait := kingpin.Flag("wait", "Maximum duration for Consul watches, before resetting and starting again.").Default("30s").Duration() caFile := kingpin.Flag("tls-ca-file", "File containing the x509 PEM-encoded CA ").ExistingFile() keyFile := kingpin.Flag("tls-key-file", "File containing the x509 PEM-encoded private key").ExistingFile() certFile := kingpin.Flag("tls-cert-file", "File containing the x509 PEM-encoded public key certificate").ExistingFile() cmd := kingpin.Parse() if *tokenFile != "" { tokenBytes, err := ioutil.ReadFile(*tokenFile) if err != nil { log.Fatalln(err) } *token = string(tokenBytes) } var transport http.RoundTripper if *caFile != "" || *keyFile != "" || *certFile != "" { tlsConfig, err := netutil.GetTLSConfig(*certFile, *keyFile, *caFile) if err != nil { log.Fatalln(err) } transport = &http.Transport{ TLSClientConfig: tlsConfig, // same dialer as http.DefaultTransport Dial: (&net.Dialer{ Timeout: http.DefaultClient.Timeout, KeepAlive: http.DefaultClient.Timeout, }).Dial, } } else { transport = http.DefaultTransport } return cmd, kp.Options{ Address: *url, Token: *token, Client: netutil.NewHeaderClient(*headers, transport), HTTPS: *https, WaitTime: *wait, } }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() store := kp.NewConsulStore(kp.Options{ Address: *consulAddress, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, }) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } path := kp.IntentPath(*nodeName) if *watchReality { path = kp.RealityPath(*nodeName) } else if *hookTypeName != "" { hookType, err := hooks.AsHookType(*hookTypeName) if err != nil { log.Fatalln(err) } path = kp.HookPath(hookType, *nodeName) } log.Printf("Watching manifests at %s\n", path) quit := make(chan struct{}) errChan := make(chan error) podCh := make(chan kp.ManifestResult) go store.WatchPods(path, quit, errChan, podCh) for { select { case result := <-podCh: fmt.Println("") result.Manifest.Write(os.Stdout) case err := <-errChan: log.Fatalf("Error occurred while listening to pods: %s", err) } } }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() store := kp.NewConsulStore(kp.Options{ Address: *consulAddress, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, }) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if len(*manifests) == 0 { kingpin.Usage() log.Fatalln("No manifests given") } for _, manifestPath := range *manifests { manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err) } path := kp.IntentPath(*nodeName, manifest.ID()) if *hookTypeName != "" { hookType, err := hooks.AsHookType(*hookTypeName) if err != nil { log.Fatalln(err) } path = kp.HookPath(hookType, manifest.ID()) } duration, err := store.SetPod(path, *manifest) if err != nil { log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err) } log.Printf("Scheduling %s took %s\n", manifest.ID(), duration) } }
func main() { replicate.Version(version.VERSION) replicate.Parse(os.Args[1:]) opts := kp.Options{ Address: *consulUrl, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, } store := kp.NewConsulStore(opts) healthChecker := health.NewConsulHealthChecker(opts) // Fetch manifest (could be URI) into temp file localMan, err := ioutil.TempFile("", "tempmanifest") defer os.Remove(localMan.Name()) if err != nil { log.Fatalln("Couldn't create tempfile") } if err := uri.URICopy(*manifestUri, localMan.Name()); err != nil { log.Fatalf("Could not fetch manifest: %s", err) } manifest, err := pods.ManifestFromPath(localMan.Name()) if err != nil { log.Fatalf("Invalid manifest: %s", err) } healthResults, err := healthChecker.Service(manifest.ID()) if err != nil { log.Fatalf("Could not get initial health results: %s", err) } order := health.SortOrder{ Nodes: *hosts, Health: healthResults, } sort.Sort(order) repl := replication.Replicator{ Manifest: *manifest, Store: store, Health: healthChecker, Nodes: *hosts, // sorted by the health.SortOrder Active: len(*hosts) - *minNodes, Logger: logging.NewLogger(logrus.Fields{ "pod": manifest.ID(), }), Threshold: health.HealthState(*threshold), } repl.Logger.Logger.Formatter = &logrus.TextFormatter{ DisableTimestamp: false, FullTimestamp: true, TimestampFormat: "15:04:05.000", } if err := repl.CheckPreparers(); err != nil { log.Fatalf("Preparer check failed: %s", err) } // create a lock with a meaningful name and set up a renewal loop for it thisHost, err := os.Hostname() if err != nil { log.Fatalf("Could not retrieve hostname: %s", err) } thisUser, err := user.Current() if err != nil { log.Fatalf("Could not retrieve user: %s", err) } lock, err := store.NewLock(fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())) if err != nil { log.Fatalf("Could not generate lock: %s", err) } // deferring on main is not particularly useful, since os.Exit will skip // the defer, so we have to manually destroy the lock at the right exit // paths go func() { for range time.Tick(10 * time.Second) { if err := lock.Renew(); err != nil { // if the renewal failed, then either the lock is already dead // or the consul agent cannot be reached log.Fatalf("Lock could not be renewed: %s", err) } } }() if err := repl.LockHosts(lock, *overrideLock); err != nil { lock.Destroy() log.Fatalf("Could not lock all hosts: %s", err) } // auto-drain this channel errs := make(chan error) go func() { for range errs { } }() quitch := make(chan struct{}) go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals close(quitch) lock.Destroy() os.Exit(1) }() repl.Enact(errs, quitch) lock.Destroy() }
func ParseWithConsulOptions() (string, kp.Options, labels.ApplicatorWithoutWatches) { consulURL := kingpin.Flag("consul", "The hostname and port of a consul agent in the p2 cluster. Defaults to 0.0.0.0:8500.").String() httpApplicatorURL := kingpin.Flag("http-applicator-url", "The URL of an labels.httpApplicator target, including the protocol and port. For example, https://consul-server.io:9999").URL() token := kingpin.Flag("token", "The consul ACL token to use. Empty by default.").String() tokenFile := kingpin.Flag("token-file", "The file containing the Consul ACL token").ExistingFile() headers := kingpin.Flag("header", "An HTTP header to add to requests, in KEY=VALUE form. Can be specified multiple times.").StringMap() https := kingpin.Flag("https", "Use HTTPS").Bool() wait := kingpin.Flag("wait", "Maximum duration for Consul watches, before resetting and starting again.").Default("30s").Duration() caFile := kingpin.Flag("tls-ca-file", "File containing the x509 PEM-encoded CA ").ExistingFile() keyFile := kingpin.Flag("tls-key-file", "File containing the x509 PEM-encoded private key").ExistingFile() certFile := kingpin.Flag("tls-cert-file", "File containing the x509 PEM-encoded public key certificate").ExistingFile() cmd := kingpin.Parse() if *tokenFile != "" { tokenBytes, err := ioutil.ReadFile(*tokenFile) if err != nil { log.Fatalln(err) } *token = string(tokenBytes) } var transport http.RoundTripper if *caFile != "" || *keyFile != "" || *certFile != "" { tlsConfig, err := netutil.GetTLSConfig(*certFile, *keyFile, *caFile) if err != nil { log.Fatalln(err) } transport = &http.Transport{ TLSClientConfig: tlsConfig, // same dialer as http.DefaultTransport Dial: (&net.Dialer{ Timeout: http.DefaultClient.Timeout, KeepAlive: http.DefaultClient.Timeout, }).Dial, } } else { transport = http.DefaultTransport } httpClient := netutil.NewHeaderClient(*headers, transport) kpOpts := kp.Options{ Address: *consulURL, Token: *token, Client: httpClient, HTTPS: *https, WaitTime: *wait, } var applicator labels.ApplicatorWithoutWatches var err error if *httpApplicatorURL != nil { applicator, err = labels.NewHTTPApplicator(httpClient, *httpApplicatorURL) if err != nil { log.Fatalln(err) } } else { applicator = labels.NewConsulApplicator(kp.NewConsulClient(kpOpts), 0) } return cmd, kpOpts, applicator }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() opts := kp.Options{ Address: *consulUrl, Token: *consulToken, Client: net.NewHeaderClient(*headers, http.DefaultTransport), HTTPS: *https, } store := kp.NewConsulStore(opts) intents, _, err := store.ListPods(kp.INTENT_TREE) if err != nil { message := "Could not list intent kvpairs: %s" if kvErr, ok := err.(kp.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } realities, _, err := store.ListPods(kp.REALITY_TREE) if err != nil { message := "Could not list reality kvpairs: %s" if kvErr, ok := err.(kp.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } statusMap := make(map[string]map[string]inspect.NodePodStatus) for _, kvp := range intents { if inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } for _, kvp := range realities { if inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } hchecker := health.NewConsulHealthChecker(opts) for podId := range statusMap { resultMap, err := hchecker.Service(podId) if err != nil { log.Fatalf("Could not retrieve health checks for pod %s: %s", podId, err) } for node, results := range resultMap { if *filterNodeName != "" && node != *filterNodeName { continue } old := statusMap[podId][node] _, old.Health = health.FindWorst(results) statusMap[podId][node] = old } } enc := json.NewEncoder(os.Stdout) enc.Encode(statusMap) }