func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if len(*manifests) == 0 { kingpin.Usage() log.Fatalln("No manifests given") } for _, manifestPath := range *manifests { manifest, err := pods.ManifestFromPath(manifestPath) if err != nil { log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err) } path := kp.IntentPath(*nodeName, manifest.ID()) if *hookGlobal { path = kp.HookPath(manifest.ID()) } duration, err := store.SetPod(path, manifest) if err != nil { log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err) } log.Printf("Scheduling %s took %s\n", manifest.ID(), duration) } }
func main() { quitCh := make(chan struct{}) _, consulOpts, labeler := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) logger := logging.NewLogger(logrus.Fields{}) dsStore := dsstore.NewConsul(client, 3, &logger) kpStore := kp.NewConsulStore(client) healthChecker := checker.NewConsulHealthChecker(client) sessions := make(chan string) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, client, sessions, quitCh, logger) dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches) go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals close(quitCh) }() dsf.Start(quitCh) }
func main() { kingpin.Version(version.VERSION) cmd, opts, labeler := flags.ParseWithConsulOptions() logger := logging.NewLogger(logrus.Fields{}) if *logJSON { logger.Logger.Formatter = &logrus.JSONFormatter{} } else { logger.Logger.Formatter = &logrus.TextFormatter{} } if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level") } logger.Logger.Level = lv } httpClient := cleanhttp.DefaultClient() client := kp.NewConsulClient(opts) sched := scheduler.NewApplicatorScheduler(labeler) rctl := rctlParams{ httpClient: httpClient, baseClient: client, rcs: rcstore.NewConsul(client, labeler, 3), rls: rollstore.NewConsul(client, labeler, nil), kps: kp.NewConsulStore(client), labeler: labeler, sched: sched, hcheck: checker.NewConsulHealthChecker(client), logger: logger, } switch cmd { case cmdCreateText: rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels) case cmdDeleteText: rctl.Delete(*deleteID, *deleteForce) case cmdReplicasText: rctl.SetReplicas(*replicasID, *replicasNum) case cmdListText: rctl.List(*listJSON) case cmdGetText: rctl.Get(*getID, *getManifest) case cmdEnableText: rctl.Enable(*enableID) case cmdDisableText: rctl.Disable(*disableID) case cmdRollText: rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey) case cmdSchedupText: rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed) } }
func main() { kingpin.Version(version.VERSION) _, opts, _ := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) podStore := podstore.NewConsul(client.KV()) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if *manifestPath == "" { kingpin.Usage() log.Fatalln("No manifest given") } podManifest, err := manifest.FromPath(*manifestPath) if err != nil { log.Fatalf("Could not read manifest at %s: %s\n", *manifestPath, err) } out := schedule.Output{ PodID: podManifest.ID(), } if *uuidPod { out.PodUniqueKey, err = podStore.Schedule(podManifest, types.NodeName(*nodeName)) if err != nil { log.Fatalf("Could not schedule pod: %s", err) } } else { // Legacy pod podPrefix := kp.INTENT_TREE if *hookGlobal { podPrefix = kp.HOOK_TREE } _, err := store.SetPod(podPrefix, types.NodeName(*nodeName), podManifest) if err != nil { log.Fatalf("Could not write manifest %s to intent store: %s\n", podManifest.ID(), err) } } outBytes, err := json.Marshal(out) if err != nil { log.Fatalf("Successfully scheduled manifest but couldn't marshal JSON output") } fmt.Println(string(outBytes)) }
func main() { kingpin.Version(version.VERSION) _, opts, labeler := flags.ParseWithConsulOptions() consulClient := kp.NewConsulClient(opts) err := handlePodRemoval(consulClient, labeler) if err != nil { fmt.Fprintf(os.Stderr, err.Error()) os.Exit(1) } }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() if *nodeName == "" { hostname, err := os.Hostname() if err != nil { fmt.Fprintf(os.Stderr, "error getting hostname. use --node to specify a node: %v\n", err) os.Exit(1) } *nodeName = hostname } rm := NewP2RM(kp.NewConsulClient(opts), *podName, types.NodeName(*nodeName)) podIsManagedByRC, rcID, err := rm.checkForManagingReplicationController() if err != nil { os.Exit(2) } if !podIsManagedByRC { err = rm.deletePod() if err != nil { os.Exit(2) } } if podIsManagedByRC && !*deallocation { fmt.Fprintf( os.Stderr, "error: %s is managed by replication controller: %s\n"+ "It's possible you meant you deallocate this pod on this node. If so, please confirm your intention with --deallocate\n", *nodeName, rcID) os.Exit(2) } if podIsManagedByRC && *deallocation { err = rm.decrementDesiredCount(rcID) if err != nil { fmt.Fprintf(os.Stderr, "Encountered error deallocating from the RC %s. You may attempt this command again or use `p2-rctl` to cleanup manually.\n%v", rcID, err) } } fmt.Printf("%s: successfully removed %s\n", rm.NodeName, rm.PodName) }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } if *podClusters { watchPodClusters(client) } else { podPrefix := kp.INTENT_TREE if *watchReality { podPrefix = kp.REALITY_TREE } else if *hooks { podPrefix = kp.HOOK_TREE } log.Printf("Watching manifests at %s/%s/\n", podPrefix, *nodeName) quit := make(chan struct{}) errChan := make(chan error) podCh := make(chan []kp.ManifestResult) go store.WatchPods(podPrefix, types.NodeName(*nodeName), quit, errChan, podCh) for { select { case results := <-podCh: if len(results) == 0 { fmt.Println(fmt.Sprintf("No manifests exist for %s under %s (they may have been deleted)", *nodeName, podPrefix)) } else { for _, result := range results { if err := result.Manifest.Write(os.Stdout); err != nil { log.Fatalf("write error: %v", err) } } } case err := <-errChan: log.Fatalf("Error occurred while listening to pods: %s", err) } } } }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) if *nodeName == "" { hostname, err := os.Hostname() if err != nil { log.Fatalf("Could not get the hostname to do scheduling: %s", err) } *nodeName = hostname } path := kp.IntentPath(*nodeName) if *watchReality { path = kp.RealityPath(*nodeName) } else if *hooks { path = kp.HookPath() } log.Printf("Watching manifests at %s\n", path) quit := make(chan struct{}) errChan := make(chan error) podCh := make(chan []kp.ManifestResult) go store.WatchPods(path, quit, errChan, podCh) for { select { case results := <-podCh: if len(results) == 0 { fmt.Println(fmt.Sprintf("No manifest exists at key %s (it may have been deleted)", path)) } else { for _, result := range results { fmt.Println("") result.Manifest.Write(os.Stdout) } } case err := <-errChan: log.Fatalf("Error occurred while listening to pods: %s", err) } } }
func main() { // Parse custom flags + standard Consul routing options _, opts, _ := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) podStore := kp_podstore.NewConsul(client.KV()) podStatusStore := podstatus.NewConsul(statusstore.NewConsul(client), kp.PreparerPodStatusNamespace) logger := log.New(os.Stderr, "", 0) port := getPort(logger) lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { logger.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() podstore_protos.RegisterP2PodStoreServer(s, podstore.NewServer(podStore, podStatusStore)) if err := s.Serve(lis); err != nil { logger.Fatalf("failed to serve: %v", err) } }
func main() { // CLI takes a hostname, a token and recursively deletes all pods // in the reality tree. This is useful if any pods on a host have // been manually altered in some way and need to be restored to // a known state. kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) pods, _, err := store.ListPods(kp.REALITY_TREE, types.NodeName(*nodeName)) if err != nil { log.Fatalf("Could not list pods for node %v: %v", *nodeName, err) } for _, pod := range pods { log.Printf("Deleting %v from reality\n", pod.Manifest.ID()) _, err := store.DeletePod(kp.REALITY_TREE, types.NodeName(*nodeName), pod.Manifest.ID()) if err != nil { log.Fatalf("Could not remove %v from pod reality tree: %v", err) } } }
func main() { kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) intents, _, err := store.ListPods(kp.INTENT_TREE) if err != nil { message := "Could not list intent kvpairs: %s" if kvErr, ok := err.(consulutil.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } realities, _, err := store.ListPods(kp.REALITY_TREE) if err != nil { message := "Could not list reality kvpairs: %s" if kvErr, ok := err.(consulutil.KVError); ok { log.Fatalf(message, kvErr.UnsafeError) } else { log.Fatalf(message, err) } } statusMap := make(map[string]map[string]inspect.NodePodStatus) for _, kvp := range intents { if inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } for _, kvp := range realities { if inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil { log.Fatal(err) } } hchecker := checker.NewConsulHealthChecker(client) for podId := range statusMap { resultMap, err := hchecker.Service(podId) if err != nil { log.Fatalf("Could not retrieve health checks for pod %s: %s", podId, err) } for node, result := range resultMap { if *filterNodeName != "" && node != *filterNodeName { continue } old := statusMap[podId][node] old.Health = result.Status statusMap[podId][node] = old } } // Keep this switch in sync with the enum options for the "format" flag. Rethink this // design once there are many different formats. switch *format { case "tree": // Native data format is already a "tree" enc := json.NewEncoder(os.Stdout) enc.Encode(statusMap) case "list": // "List" format is a flattened version of "tree" output := make([]inspect.NodePodStatus, 0) for podId, nodes := range statusMap { for node, status := range nodes { status.PodId = podId status.NodeName = node output = append(output, status) } } enc := json.NewEncoder(os.Stdout) enc.Encode(output) default: log.Fatalf("unrecognized format: %s", *format) } }
func main() { cmd, _, applicator := flags.ParseWithConsulOptions() exitCode := 0 switch cmd { case CmdShow: labelType, err := labels.AsType(*showLabelType) if err != nil { fmt.Fprintf(os.Stderr, "Error while parsing label type. Check the commandline.\n%v\n", err) exitCode = 1 break } labelsForEntity, err := applicator.GetLabels(labelType, *showID) if err != nil { fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err) exitCode = 1 break } fmt.Printf("%s/%s: %s\n", labelType, *showID, labelsForEntity.Labels.String()) return case CmdApply: // if xnor(selector, id) if (*applySubjectSelector == "") == (*applySubjectID == "") { fmt.Fprint(os.Stderr, "Must pass either an ID or a selector for objects to apply the given label to") exitCode = 1 break } autoConfirm = *applyAutoConfirm labelType, err := labels.AsType(*applyLabelType) if err != nil { fmt.Fprintf(os.Stderr, "Unrecognized type %s. Check the commandline and documentation.\nhttps://godoc.org/github.com/square/p2/pkg/labels#pkg-constants\n", *applyLabelType) exitCode = 1 break } additiveLabels := *applyAddititiveLabels destructiveKeys := *applyDestructiveLabels var matches []labels.Labeled if *applySubjectSelector != "" { subject, err := klabels.Parse(*applySubjectSelector) if err != nil { fmt.Fprintf(os.Stderr, "Error while parsing subject label. Check the syntax.\n%v\n", err) exitCode = 1 break } cachedMatch := false matches, err = applicator.GetMatches(subject, labelType, cachedMatch) if err != nil { fmt.Fprintf(os.Stderr, "Error while finding label matches. Check the syntax.\n%v\n", err) exitCode = 1 break } } else { matches = []labels.Labeled{{ID: *applySubjectID}} } if len(additiveLabels) > 0 { fmt.Printf("labels to be added: %s\n", klabels.Set(additiveLabels)) } if len(destructiveKeys) > 0 { fmt.Printf("labels to be removed: %s\n", destructiveKeys) } var labelsForEntity labels.Labeled for _, match := range matches { entityID := match.ID err := applyLabels(applicator, entityID, labelType, additiveLabels, destructiveKeys) if err != nil { fmt.Printf("Encountered err during labeling, %v", err) exitCode = 1 } labelsForEntity, err = applicator.GetLabels(labelType, entityID) if err != nil { fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err) exitCode = 1 continue } fmt.Printf("%s/%s: %s\n", labelType, entityID, labelsForEntity.Labels.String()) } break } os.Exit(exitCode) }
func main() { kingpin.CommandLine.Name = "p2-replicate" kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information. Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com This will take the pod whose manifest is located at helloworld.yaml and deploy it to the three nodes aws1.example.com, aws2.example.com, and aws3.example.com Because of --min-nodes 2, the replicator will ensure that at least two healthy nodes remain up at all times, according to p2's health checks. ` kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(opts) store := kp.NewConsulStore(client) healthChecker := checker.NewConsulHealthChecker(client) manifest, err := pods.ManifestFromURI(*manifestUri) if err != nil { log.Fatalf("%s", err) } logger := logging.NewLogger(logrus.Fields{ "pod": manifest.ID(), }) logger.Logger.Formatter = &logrus.TextFormatter{ DisableTimestamp: false, FullTimestamp: true, TimestampFormat: "15:04:05.000", } // create a lock with a meaningful name and set up a renewal loop for it thisHost, err := os.Hostname() if err != nil { log.Fatalf("Could not retrieve hostname: %s", err) } thisUser, err := user.Current() if err != nil { log.Fatalf("Could not retrieve user: %s", err) } lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now()) repl, err := replication.NewReplicator( manifest, logger, *hosts, len(*hosts)-*minNodes, store, healthChecker, health.HealthState(*threshold), lockMessage, ) if err != nil { log.Fatalf("Could not initialize replicator: %s", err) } replication, errCh, err := repl.InitializeReplication(*overrideLock) if err != nil { log.Fatalf("Unable to initialize replication: %s", err) } // auto-drain this channel go func() { for range errCh { } }() go func() { // clear lock immediately on ctrl-C signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) <-signals replication.Cancel() os.Exit(1) }() replication.Enact() }
func main() { cmd, consulOpts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) logger := logging.NewLogger(logrus.Fields{}) dsstore := dsstore.NewConsul(client, 3, &logger) applicator := labels.NewConsulApplicator(client, 3) switch cmd { case CmdCreate: minHealth, err := strconv.Atoi(*createMinHealth) if err != nil { log.Fatalf("Invalid value for minimum health, expected integer: %v", err) } name := ds_fields.ClusterName(*createName) manifest, err := manifest.FromPath(*createManifest) if err != nil { log.Fatalf("%s", err) } podID := manifest.ID() if *createTimeout <= time.Duration(0) { log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout) } selectorString := *createSelector if *createEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { selectorString = klabels.Nothing().String() log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator) if err != nil { log.Fatalf("Error occurred: %v", err) } if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil { log.Fatalf("Error occurred: %v", err) } ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("%v has been created in consul", ds.ID) fmt.Println() case CmdGet: id := ds_fields.ID(*getID) ds, _, err := dsstore.Get(id) if err != nil { log.Fatalf("err: %v", err) } bytes, err := json.Marshal(ds) if err != nil { logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON") } fmt.Printf("%s", bytes) case CmdList: dsList, err := dsstore.List() if err != nil { log.Fatalf("err: %v", err) } podID := types.PodID(*listPod) for _, ds := range dsList { if *listPod == "" || podID == ds.PodID { fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID) } } case CmdEnable: id := ds_fields.ID(*enableID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { if !ds.Disabled { return ds, util.Errorf("Daemon set has already been enabled") } ds.Disabled = false return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String()) fmt.Println() case CmdDisable: id := ds_fields.ID(*disableID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { if ds.Disabled { return ds, util.Errorf("Daemon set has already been disabled") } ds.Disabled = true return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String()) fmt.Println() case CmdDelete: id := ds_fields.ID(*deleteID) err := dsstore.Delete(id) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String()) fmt.Println() case CmdUpdate: id := ds_fields.ID(*updateID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { changed := false if *updateMinHealth != "" { minHealth, err := strconv.Atoi(*updateMinHealth) if err != nil { log.Fatalf("Invalid value for minimum health, expected integer") } if ds.MinHealth != minHealth { changed = true ds.MinHealth = minHealth } } if *updateName != "" { name := ds_fields.ClusterName(*updateName) if ds.Name != name { changed = true ds.Name = name } } if *updateTimeout != TimeoutNotSpecified { if *updateTimeout <= time.Duration(0) { return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout) } if ds.Timeout != *updateTimeout { changed = true ds.Timeout = *updateTimeout } } if *updateManifest != "" { manifest, err := manifest.FromPath(*updateManifest) if err != nil { return ds, util.Errorf("%s", err) } if manifest.ID() != ds.PodID { return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID) } dsSHA, err := ds.Manifest.SHA() if err != nil { return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err) } newSHA, err := manifest.SHA() if err != nil { return ds, util.Errorf("Unable to get SHA from new manifest: %v", err) } if dsSHA != newSHA { changed = true ds.Manifest = manifest } } if updateSelectorGiven { selectorString := *updateSelector if *updateEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator) if err != nil { return ds, util.Errorf("Error occurred: %v", err) } if ds.NodeSelector.String() != selector.String() { changed = true ds.NodeSelector = selector } } if !changed { return ds, util.Errorf("No changes were made") } if updateSelectorGiven || *updateMinHealth != "" { if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil { return ds, util.Errorf("Error occurred: %v", err) } } return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String()) fmt.Println() case CmdTestSelector: selectorString := *testSelectorString if *testSelectorEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelector(selectorString) if err != nil { log.Fatalf("Error occurred: %v", err) } matches, err := applicator.GetMatches(selector, labels.NODE, false) if err != nil { log.Fatalf("Error getting matching labels: %v", err) } fmt.Println(matches) default: log.Fatalf("Unrecognized command %v", cmd) } }
func main() { // Parse custom flags + standard Consul routing options kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() // Set up the logger logger := logging.NewLogger(logrus.Fields{}) logger.Logger.Formatter = new(logrus.TextFormatter) if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}). Fatalln("Could not parse log level") } logger.Logger.Level = lv } // Initialize the myriad of different storage components httpClient := cleanhttp.DefaultClient() client := kp.NewConsulClient(opts) kpStore := kp.NewConsulStore(client) rcStore := rcstore.NewConsul(client, RetryCount) rollStore := rollstore.NewConsul(client, nil) healthChecker := checker.NewConsulHealthChecker(client) labeler := labels.NewConsulApplicator(client, RetryCount) var sched scheduler.Scheduler if *labelEndpoint != "" { endpoint, err := url.Parse(*labelEndpoint) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "url": *labelEndpoint, }).Fatalln("Could not parse URL from label endpoint") } httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint) if err != nil { logger.WithError(err).Fatalln("Could not create label applicator from endpoint") } sched = scheduler.NewApplicatorScheduler(httpLabeler) } else { sched = scheduler.NewApplicatorScheduler(labeler) } // Start acquiring sessions sessions := make(chan string) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, client, sessions, nil, logger) pub := stream.NewStringValuePublisher(sessions, "") alerter := alerting.NewNop() if *pagerdutyServiceKey != "" { var err error alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient) if err != nil { logger.WithError(err).Fatalln( "Unable to initialize pagerduty alerter", ) } } // Run the farms! go rc.NewFarm( kpStore, rcStore, sched, labeler, pub.Subscribe().Chan(), logger, klabels.Everything(), alerter, ).Start(nil) roll.NewFarm( roll.UpdateFactory{ KPStore: kpStore, RCStore: rcStore, HealthChecker: healthChecker, Labeler: labeler, Scheduler: sched, }, kpStore, rollStore, rcStore, pub.Subscribe().Chan(), logger, labeler, klabels.Everything(), alerter, ).Start(nil) }
func main() { cmd, consulOpts, labeler := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) kv := kp.NewConsulStore(client) logger := logging.NewLogger(logrus.Fields{}) pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger) session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil) if err != nil { log.Fatalf("Could not create session: %s", err) } switch cmd { case cmdCreateText: az := fields.AvailabilityZone(*createAZ) cn := fields.ClusterName(*createName) podID := types.PodID(*createPodID) selector := selectorFrom(az, cn, podID) pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session) annotations := *createAnnotations var parsedAnnotations map[string]interface{} err := json.Unmarshal([]byte(annotations), &parsedAnnotations) if err != nil { log.Fatalf("could not parse json: %v", err) } _, err = pccontrol.Create(parsedAnnotations) if err != nil { log.Fatalf("err: %v", err) } case cmdGetText: az := fields.AvailabilityZone(*getAZ) cn := fields.ClusterName(*getName) podID := types.PodID(*getPodID) pcID := fields.ID(*getID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } pc, err := pccontrol.Get() if err != nil { log.Fatalf("Caught error while fetching pod cluster: %v", err) } bytes, err := json.Marshal(pc) if err != nil { logger.WithError(err).Fatalln("Unable to marshal PC as JSON") } fmt.Printf("%s", bytes) case cmdDeleteText: az := fields.AvailabilityZone(*deleteAZ) cn := fields.ClusterName(*deleteName) podID := types.PodID(*deletePodID) pcID := fields.ID(*deleteID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } errors := pccontrol.Delete() if len(errors) >= 1 { for _, err := range errors { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error()))) } os.Exit(1) } case cmdUpdateText: az := fields.AvailabilityZone(*updateAZ) cn := fields.ClusterName(*updateName) podID := types.PodID(*updatePodID) pcID := fields.ID(*updateID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } var annotations fields.Annotations err := json.Unmarshal([]byte(*updateAnnotations), &annotations) if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err))) os.Exit(1) } pc, err := pccontrol.Update(annotations) if err != nil { log.Fatalf("Error during PodCluster update: %v\n%v", err, pc) os.Exit(1) } bytes, err := json.Marshal(pc) if err != nil { log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc) os.Exit(1) } fmt.Printf("%s", bytes) case cmdListText: pcs, err := pcstore.List() if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err))) os.Exit(1) } bytes, err := json.Marshal(pcs) if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err))) os.Exit(1) } fmt.Printf("%s", bytes) default: log.Fatalf("Unrecognized command %v", cmd) } }
func main() { kingpin.Version(version.VERSION) cmd, opts := flags.ParseWithConsulOptions() logger := logging.NewLogger(logrus.Fields{}) logger.Logger.Formatter = &logrus.TextFormatter{} if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level") } logger.Logger.Level = lv } client := kp.NewConsulClient(opts) labeler := labels.NewConsulApplicator(client, 3) sched := rc.NewApplicatorScheduler(labeler) if *labelEndpoint != "" { endpoint, err := url.Parse(*labelEndpoint) if err != nil { logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{ "url": *labelEndpoint, }).Fatalln("Could not parse URL from label endpoint") } httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint) if err != nil { logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint") } sched = rc.NewApplicatorScheduler(httpLabeler) } rctl := RCtl{ baseClient: client, rcs: rcstore.NewConsul(client, 3), rls: rollstore.NewConsul(client), kps: kp.NewConsulStore(client), labeler: labeler, sched: sched, hcheck: checker.NewConsulHealthChecker(client), logger: logger, } switch cmd { case CMD_CREATE: rctl.Create(*createManifest, *createNodeSel, *createPodLabels) case CMD_DELETE: rctl.Delete(*deleteID, *deleteForce) case CMD_REPLICAS: rctl.SetReplicas(*replicasID, *replicasNum) case CMD_LIST: rctl.List(*listJSON) case CMD_GET: rctl.Get(*getID, *getManifest) case CMD_ENABLE: rctl.Enable(*enableID) case CMD_DISABLE: rctl.Disable(*disableID) case CMD_ROLL: rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollDelete) case CMD_FARM: rctl.Farm() case CMD_SCHEDUP: rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed, *schedupDelete) } }