func main() { quitCh := make(chan struct{}) _, consulOpts, labeler := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) logger := logging.NewLogger(logrus.Fields{}) dsStore := dsstore.NewConsul(client, 3, &logger) kpStore := kp.NewConsulStore(client) healthChecker := checker.NewConsulHealthChecker(client) sessions := make(chan string) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, client, sessions, quitCh, logger) dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches) go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals close(quitCh) }() dsf.Start(quitCh) }
func main() { kingpin.Version(version.VERSION) cmd, opts, labeler := flags.ParseWithConsulOptions() logger := logging.NewLogger(logrus.Fields{}) if *logJSON { logger.Logger.Formatter = &logrus.JSONFormatter{} } else { logger.Logger.Formatter = &logrus.TextFormatter{} } if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level") } logger.Logger.Level = lv } httpClient := cleanhttp.DefaultClient() client := kp.NewConsulClient(opts) sched := scheduler.NewApplicatorScheduler(labeler) rctl := rctlParams{ httpClient: httpClient, baseClient: client, rcs: rcstore.NewConsul(client, labeler, 3), rls: rollstore.NewConsul(client, labeler, nil), kps: kp.NewConsulStore(client), labeler: labeler, sched: sched, hcheck: checker.NewConsulHealthChecker(client), logger: logger, } switch cmd { case cmdCreateText: rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels) case cmdDeleteText: rctl.Delete(*deleteID, *deleteForce) case cmdReplicasText: rctl.SetReplicas(*replicasID, *replicasNum) case cmdListText: rctl.List(*listJSON) case cmdGetText: rctl.Get(*getID, *getManifest) case cmdEnableText: rctl.Enable(*enableID) case cmdDisableText: rctl.Disable(*disableID) case cmdRollText: rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey) case cmdSchedupText: rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed) } }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) intents, _, err := store.ListPods(kp.INTENT_TREE) if err != nil { message := "Could not list intent kvpairs: %s" if kvErr, ok := err.(consulutil.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } realities, _, err := store.ListPods(kp.REALITY_TREE) if err != nil { message := "Could not list reality kvpairs: %s" if kvErr, ok := err.(consulutil.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } statusMap := make(map[string]map[string]inspect.NodePodStatus) for _, kvp := range intents { if inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } for _, kvp := range realities { if inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } hchecker := checker.NewConsulHealthChecker(client) for podId := range statusMap { resultMap, err := hchecker.Service(podId) if err != nil { log.Fatalf("Could not retrieve health checks for pod %s: %s", podId, err) } for node, result := range resultMap { if *filterNodeName != "" && node != *filterNodeName { continue } old := statusMap[podId][node] old.Health = result.Status statusMap[podId][node] = old } } // Keep this switch in sync with the enum options for the "format" flag. Rethink this // design once there are many different formats. switch *format { case "tree": // Native data format is already a "tree" enc := json.NewEncoder(os.Stdout) enc.Encode(statusMap) case "list": // "List" format is a flattened version of "tree" output := make([]inspect.NodePodStatus, 0) for podId, nodes := range statusMap { for node, status := range nodes { status.PodId = podId status.NodeName = node output = append(output, status) } } enc := json.NewEncoder(os.Stdout) enc.Encode(output) default: log.Fatalf("unrecognized format: %s", *format) } }
func main() { kingpin.CommandLine.Name = "p2-replicate" kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information. Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com This will take the pod whose manifest is located at helloworld.yaml and deploy it to the three nodes aws1.example.com, aws2.example.com, and aws3.example.com Because of --min-nodes 2, the replicator will ensure that at least two healthy nodes remain up at all times, according to p2's health checks. ` kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) healthChecker := checker.NewConsulHealthChecker(client) manifest, err := pods.ManifestFromURI(*manifestUri) if err != nil { log.Fatalf("%s", err) } logger := logging.NewLogger(logrus.Fields{ "pod": manifest.ID(), }) logger.Logger.Formatter = &logrus.TextFormatter{ DisableTimestamp: false, FullTimestamp: true, TimestampFormat: "15:04:05.000", } // create a lock with a meaningful name and set up a renewal loop for it thisHost, err := os.Hostname() if err != nil { log.Fatalf("Could not retrieve hostname: %s", err) } thisUser, err := user.Current() if err != nil { log.Fatalf("Could not retrieve user: %s", err) } lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now()) repl, err := replication.NewReplicator( manifest, logger, *hosts, len(*hosts)-*minNodes, store, healthChecker, health.HealthState(*threshold), lockMessage, ) if err != nil { log.Fatalf("Could not initialize replicator: %s", err) } replication, errCh, err := repl.InitializeReplication(*overrideLock) if err != nil { log.Fatalf("Unable to initialize replication: %s", err) } // auto-drain this channel go func() { for range errCh { } }() go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals replication.Cancel() os.Exit(1) }() replication.Enact() }
func main() { kingpin.Version(version.VERSION) cmd, opts := flags.ParseWithConsulOptions() logger := logging.NewLogger(logrus.Fields{}) logger.Logger.Formatter = &logrus.TextFormatter{} if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level") } logger.Logger.Level = lv } client := kp.NewConsulClient(opts) labeler := labels.NewConsulApplicator(client, 3) sched := rc.NewApplicatorScheduler(labeler) if *labelEndpoint != "" { endpoint, err := url.Parse(*labelEndpoint) if err != nil { logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{ "url": *labelEndpoint, }).Fatalln("Could not parse URL from label endpoint") } httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint) if err != nil { logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint") } sched = rc.NewApplicatorScheduler(httpLabeler) } rctl := RCtl{ baseClient: client, rcs: rcstore.NewConsul(client, 3), rls: rollstore.NewConsul(client), kps: kp.NewConsulStore(client), labeler: labeler, sched: sched, hcheck: checker.NewConsulHealthChecker(client), logger: logger, } switch cmd { case CMD_CREATE: rctl.Create(*createManifest, *createNodeSel, *createPodLabels) case CMD_DELETE: rctl.Delete(*deleteID, *deleteForce) case CMD_REPLICAS: rctl.SetReplicas(*replicasID, *replicasNum) case CMD_LIST: rctl.List(*listJSON) case CMD_GET: rctl.Get(*getID, *getManifest) case CMD_ENABLE: rctl.Enable(*enableID) case CMD_DISABLE: rctl.Disable(*disableID) case CMD_ROLL: rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollDelete) case CMD_FARM: rctl.Farm() case CMD_SCHEDUP: rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed, *schedupDelete) } }
func main() { // Parse custom flags + standard Consul routing options kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() // Set up the logger logger := logging.NewLogger(logrus.Fields{}) logger.Logger.Formatter = new(logrus.TextFormatter) if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}). Fatalln("Could not parse log level") } logger.Logger.Level = lv } // Initialize the myriad of different storage components httpClient := cleanhttp.DefaultClient() client := kp.NewConsulClient(opts) kpStore := kp.NewConsulStore(client) rcStore := rcstore.NewConsul(client, RetryCount) rollStore := rollstore.NewConsul(client, nil) healthChecker := checker.NewConsulHealthChecker(client) labeler := labels.NewConsulApplicator(client, RetryCount) var sched scheduler.Scheduler if *labelEndpoint != "" { endpoint, err := url.Parse(*labelEndpoint) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "url": *labelEndpoint, }).Fatalln("Could not parse URL from label endpoint") } httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint) if err != nil { logger.WithError(err).Fatalln("Could not create label applicator from endpoint") } sched = scheduler.NewApplicatorScheduler(httpLabeler) } else { sched = scheduler.NewApplicatorScheduler(labeler) } // Start acquiring sessions sessions := make(chan string) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, client, sessions, nil, logger) pub := stream.NewStringValuePublisher(sessions, "") alerter := alerting.NewNop() if *pagerdutyServiceKey != "" { var err error alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient) if err != nil { logger.WithError(err).Fatalln( "Unable to initialize pagerduty alerter", ) } } // Run the farms! go rc.NewFarm( kpStore, rcStore, sched, labeler, pub.Subscribe().Chan(), logger, klabels.Everything(), alerter, ).Start(nil) roll.NewFarm( roll.UpdateFactory{ KPStore: kpStore, RCStore: rcStore, HealthChecker: healthChecker, Labeler: labeler, Scheduler: sched, }, kpStore, rollStore, rcStore, pub.Subscribe().Chan(), logger, labeler, klabels.Everything(), alerter, ).Start(nil) }