Ejemplo n.º 1
0
func NewConsul(client consulutil.ConsulClient, retries int) *consulStore {
	return &consulStore{
		retries:    retries,
		applicator: labels.NewConsulApplicator(client, retries),
		kv:         client.KV(),
	}
}
Ejemplo n.º 2
0
func TestInitializeReplicationWithManaged(t *testing.T) {
	replicator, _, f := testReplicatorAndServer(t)
	defer f.Stop()
	setupPreparers(f)

	// Make one node appear to be managed by a replication controller
	err := labels.NewConsulApplicator(f.Client, 1).SetLabel(
		labels.POD,
		path.Join(testNodes[0].String(), testPodId),
		rc.RCIDLabel,
		"controller GUID ignored",
	)
	if err != nil {
		t.Fatal(err)
	}

	// Replication should fail because one node is managed
	replication, _, err := replicator.InitializeReplication(false, false, 0, 0)
	if err == nil {
		t.Errorf("replication did not reject managed node")
		replication.Cancel()
	}

	// Replication should succeed when explicitly ignoring the controllers
	time.Sleep(50 * time.Millisecond)
	replication, _, err = replicator.InitializeReplication(false, true, 0, 0)
	if err != nil {
		t.Fatal("replication could not ignore managed node:", err)
	}
	replication.Cancel()
}
Ejemplo n.º 3
0
func main() {
	quitCh := make(chan struct{})

	_, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsStore := dsstore.NewConsul(client, 3, &logger)
	kpStore := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, quitCh, logger)

	dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches)

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitCh)
	}()

	dsf.Start(quitCh)
}
Ejemplo n.º 4
0
// NOTE: The "retries" concept is mimicking what is built in rcstore.
// TODO: explore transactionality of operations and returning errors instead of
// using retries
func NewConsul(client consulutil.ConsulClient, retries int, logger *logging.Logger) Store {
	return &consulStore{
		applicator: labels.NewConsulApplicator(client, retries),
		kv:         client.KV(),
		logger:     *logger,
	}
}
Ejemplo n.º 5
0
Archivo: main.go Proyecto: rudle/p2
// NewP2RM is a constructor for the P2RM type. It will generate the necessary
// storage types based on its api.Client argument
func NewP2RM(client consulutil.ConsulClient, podName string, nodeName types.NodeName) *P2RM {
	rm := &P2RM{}
	rm.Client = client
	rm.Store = kp.NewConsulStore(client)
	rm.RCStore = rcstore.NewConsul(client, 5)
	rm.Labeler = labels.NewConsulApplicator(client, 3)
	rm.LabelID = path.Join(nodeName.String(), podName)
	rm.PodName = podName
	rm.NodeName = nodeName

	return rm
}
Ejemplo n.º 6
0
func NewConsul(c consulutil.ConsulClient, logger *logging.Logger) Store {
	if logger == nil {
		logger = &logging.DefaultLogger
	}
	return consulStore{
		kv:      c.KV(),
		rcstore: rcstore.NewConsul(c, 3),
		logger:  *logger,
		labeler: labels.NewConsulApplicator(c, 3),
		store:   kp.NewConsulStore(c),
	}
}
Ejemplo n.º 7
0
func watchPodClusters(client consulutil.ConsulClient, applicator labels.ApplicatorWithoutWatches) {
	logger := &logging.DefaultLogger
	logger.Infoln("Beginning pod cluster watch")

	pcStore := pcstore.NewConsul(client, applicator, labels.NewConsulApplicator(client, 0), logger)
	quitCh := make(chan struct{})
	go func() {
		signalCh := make(chan os.Signal, 2)
		signal.Notify(signalCh, syscall.SIGTERM, os.Interrupt)
		received := <-signalCh
		logger.Warnf("Received %v, shutting down", received)
		close(quitCh)
	}()

	if err := pcStore.WatchAndSync(&printSyncer{logger}, quitCh); err != nil {
		log.Fatalf("error watching pod cluster: %v", err)
	}
}
Ejemplo n.º 8
0
func waitForPodLabeledWithRC(selector klabels.Selector, rcID fields.ID) error {
	client := kp.NewConsulClient(kp.Options{})
	applicator := labels.NewConsulApplicator(client, 1)

	// we have to label this hostname as being allowed to run tests
	host, err := os.Hostname()
	if err != nil {
		return fmt.Errorf("Could not get hostname: %s", err)
	}
	err = applicator.SetLabel(labels.NODE, host, "test", "yes")
	if err != nil {
		return fmt.Errorf("Could not set node selector label on %s: %v", host, err)
	}

	quitCh := make(chan struct{})
	defer close(quitCh)
	watchCh := applicator.WatchMatches(selector, labels.POD, quitCh)
	waitTime := time.After(30 * time.Second)
	for {
		select {
		case <-waitTime:
			return fmt.Errorf("Label selector %v wasn't matched before timeout: %s", selector, targetLogs())
		case res, ok := <-watchCh:
			if !ok {
				return fmt.Errorf("Label selector watch unexpectedly terminated")
			}
			if len(res) > 1 {
				return fmt.Errorf("Too many results found, should only have 1: %v", res)
			}
			if len(res) == 1 {
				_, podID, err := labels.NodeAndPodIDFromPodLabel(res[0])
				if err != nil {
					return err
				}
				if podID.String() != "hello" {
					return fmt.Errorf("Should have found the hello pod, instead found %s", podID)
				}
				return nil
			}
		}
	}
}
Ejemplo n.º 9
0
func testReplicatorAndServer(t *testing.T) (Replicator, Store, consulutil.Fixture) {
	active := 1
	store, f := makeStore(t)

	healthChecker := fake_checker.HappyHealthChecker(testNodes)
	threshold := health.Passing
	replicator, err := NewReplicator(
		basicManifest(),
		basicLogger(),
		testNodes,
		active,
		store,
		labels.NewConsulApplicator(f.Client, 1),
		healthChecker,
		threshold,
		testLockMessage,
		NoTimeout,
	)

	if err != nil {
		t.Fatalf("Unable to initialize replicator: %s", err)
	}
	return replicator, store, f
}
Ejemplo n.º 10
0
func ParseWithConsulOptions() (string, kp.Options, labels.ApplicatorWithoutWatches) {
	consulURL := kingpin.Flag("consul", "The hostname and port of a consul agent in the p2 cluster. Defaults to 0.0.0.0:8500.").String()
	httpApplicatorURL := kingpin.Flag("http-applicator-url", "The URL of an labels.httpApplicator target, including the protocol and port. For example, https://consul-server.io:9999").URL()
	token := kingpin.Flag("token", "The consul ACL token to use. Empty by default.").String()
	tokenFile := kingpin.Flag("token-file", "The file containing the Consul ACL token").ExistingFile()
	headers := kingpin.Flag("header", "An HTTP header to add to requests, in KEY=VALUE form. Can be specified multiple times.").StringMap()
	https := kingpin.Flag("https", "Use HTTPS").Bool()
	wait := kingpin.Flag("wait", "Maximum duration for Consul watches, before resetting and starting again.").Default("30s").Duration()
	caFile := kingpin.Flag("tls-ca-file", "File containing the x509 PEM-encoded CA ").ExistingFile()
	keyFile := kingpin.Flag("tls-key-file", "File containing the x509 PEM-encoded private key").ExistingFile()
	certFile := kingpin.Flag("tls-cert-file", "File containing the x509 PEM-encoded public key certificate").ExistingFile()

	cmd := kingpin.Parse()

	if *tokenFile != "" {
		tokenBytes, err := ioutil.ReadFile(*tokenFile)
		if err != nil {
			log.Fatalln(err)
		}
		*token = string(tokenBytes)
	}
	var transport http.RoundTripper
	if *caFile != "" || *keyFile != "" || *certFile != "" {
		tlsConfig, err := netutil.GetTLSConfig(*certFile, *keyFile, *caFile)
		if err != nil {
			log.Fatalln(err)
		}

		transport = &http.Transport{
			TLSClientConfig: tlsConfig,
			// same dialer as http.DefaultTransport
			Dial: (&net.Dialer{
				Timeout:   http.DefaultClient.Timeout,
				KeepAlive: http.DefaultClient.Timeout,
			}).Dial,
		}
	} else {
		transport = http.DefaultTransport
	}
	httpClient := netutil.NewHeaderClient(*headers, transport)

	kpOpts := kp.Options{
		Address:  *consulURL,
		Token:    *token,
		Client:   httpClient,
		HTTPS:    *https,
		WaitTime: *wait,
	}

	var applicator labels.ApplicatorWithoutWatches
	var err error
	if *httpApplicatorURL != nil {
		applicator, err = labels.NewHTTPApplicator(httpClient, *httpApplicatorURL)
		if err != nil {
			log.Fatalln(err)
		}
	} else {
		applicator = labels.NewConsulApplicator(kp.NewConsulClient(kpOpts), 0)
	}
	return cmd, kpOpts, applicator
}
Ejemplo n.º 11
0
Archivo: main.go Proyecto: rudle/p2
func main() {
	cmd, consulOpts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsstore := dsstore.NewConsul(client, 3, &logger)
	applicator := labels.NewConsulApplicator(client, 3)

	switch cmd {
	case CmdCreate:
		minHealth, err := strconv.Atoi(*createMinHealth)
		if err != nil {
			log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
		}
		name := ds_fields.ClusterName(*createName)

		manifest, err := manifest.FromPath(*createManifest)
		if err != nil {
			log.Fatalf("%s", err)
		}

		podID := manifest.ID()

		if *createTimeout <= time.Duration(0) {
			log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
		}

		selectorString := *createSelector
		if *createEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			selectorString = klabels.Nothing().String()
			log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("%v has been created in consul", ds.ID)
		fmt.Println()

	case CmdGet:
		id := ds_fields.ID(*getID)
		ds, _, err := dsstore.Get(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		bytes, err := json.Marshal(ds)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
		}
		fmt.Printf("%s", bytes)

	case CmdList:
		dsList, err := dsstore.List()
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		podID := types.PodID(*listPod)
		for _, ds := range dsList {
			if *listPod == "" || podID == ds.PodID {
				fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
			}
		}

	case CmdEnable:
		id := ds_fields.ID(*enableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if !ds.Disabled {
				return ds, util.Errorf("Daemon set has already been enabled")
			}
			ds.Disabled = false
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
		fmt.Println()

	case CmdDisable:
		id := ds_fields.ID(*disableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if ds.Disabled {
				return ds, util.Errorf("Daemon set has already been disabled")
			}
			ds.Disabled = true
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String())
		fmt.Println()

	case CmdDelete:
		id := ds_fields.ID(*deleteID)
		err := dsstore.Delete(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String())
		fmt.Println()

	case CmdUpdate:
		id := ds_fields.ID(*updateID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			changed := false
			if *updateMinHealth != "" {
				minHealth, err := strconv.Atoi(*updateMinHealth)
				if err != nil {
					log.Fatalf("Invalid value for minimum health, expected integer")
				}
				if ds.MinHealth != minHealth {
					changed = true
					ds.MinHealth = minHealth
				}
			}
			if *updateName != "" {
				name := ds_fields.ClusterName(*updateName)
				if ds.Name != name {
					changed = true
					ds.Name = name
				}
			}

			if *updateTimeout != TimeoutNotSpecified {
				if *updateTimeout <= time.Duration(0) {
					return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
				}
				if ds.Timeout != *updateTimeout {
					changed = true
					ds.Timeout = *updateTimeout
				}
			}
			if *updateManifest != "" {
				manifest, err := manifest.FromPath(*updateManifest)
				if err != nil {
					return ds, util.Errorf("%s", err)
				}

				if manifest.ID() != ds.PodID {
					return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID)
				}

				dsSHA, err := ds.Manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err)
				}
				newSHA, err := manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from new manifest: %v", err)
				}
				if dsSHA != newSHA {
					changed = true
					ds.Manifest = manifest
				}
			}
			if updateSelectorGiven {
				selectorString := *updateSelector
				if *updateEverywhere {
					selectorString = klabels.Everything().String()
				} else if selectorString == "" {
					return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag")
				}
				selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator)
				if err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
				if ds.NodeSelector.String() != selector.String() {
					changed = true
					ds.NodeSelector = selector
				}
			}

			if !changed {
				return ds, util.Errorf("No changes were made")
			}

			if updateSelectorGiven || *updateMinHealth != "" {
				if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
			}

			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String())
		fmt.Println()

	case CmdTestSelector:
		selectorString := *testSelectorString
		if *testSelectorEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelector(selectorString)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		matches, err := applicator.GetMatches(selector, labels.NODE, false)
		if err != nil {
			log.Fatalf("Error getting matching labels: %v", err)
		}
		fmt.Println(matches)

	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Ejemplo n.º 12
0
Archivo: main.go Proyecto: drcapulet/p2
func main() {
	// Parse custom flags + standard Consul routing options
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	// Set up the logger
	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = new(logrus.TextFormatter)
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
				Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	// Initialize the myriad of different storage components
	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	kpStore := kp.NewConsulStore(client)
	rcStore := rcstore.NewConsul(client, RetryCount)
	rollStore := rollstore.NewConsul(client, nil)
	healthChecker := checker.NewConsulHealthChecker(client)
	labeler := labels.NewConsulApplicator(client, RetryCount)
	var sched scheduler.Scheduler
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = scheduler.NewApplicatorScheduler(httpLabeler)
	} else {
		sched = scheduler.NewApplicatorScheduler(labeler)
	}

	// Start acquiring sessions
	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, nil, logger)
	pub := stream.NewStringValuePublisher(sessions, "")

	alerter := alerting.NewNop()
	if *pagerdutyServiceKey != "" {
		var err error
		alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
		if err != nil {
			logger.WithError(err).Fatalln(
				"Unable to initialize pagerduty alerter",
			)
		}
	}

	// Run the farms!
	go rc.NewFarm(
		kpStore,
		rcStore,
		sched,
		labeler,
		pub.Subscribe().Chan(),
		logger,
		klabels.Everything(),
		alerter,
	).Start(nil)
	roll.NewFarm(
		roll.UpdateFactory{
			KPStore:       kpStore,
			RCStore:       rcStore,
			HealthChecker: healthChecker,
			Labeler:       labeler,
			Scheduler:     sched,
		},
		kpStore,
		rollStore,
		rcStore,
		pub.Subscribe().Chan(),
		logger,
		labeler,
		klabels.Everything(),
		alerter,
	).Start(nil)
}
Ejemplo n.º 13
0
Archivo: main.go Proyecto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	if *logJSON {
		logger.Logger.Formatter = &logrus.JSONFormatter{}
	} else {
		logger.Logger.Formatter = &logrus.TextFormatter{}
	}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	labeler := labels.NewConsulApplicator(client, 3)
	sched := scheduler.NewApplicatorScheduler(labeler)
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHTTPApplicator(opts.Client, endpoint)
		if err != nil {
			logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = scheduler.NewApplicatorScheduler(httpLabeler)
	}
	rctl := rctlParams{
		httpClient: httpClient,
		baseClient: client,
		rcs:        rcstore.NewConsul(client, 3),
		rls:        rollstore.NewConsul(client, nil),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case cmdCreateText:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels)
	case cmdDeleteText:
		rctl.Delete(*deleteID, *deleteForce)
	case cmdReplicasText:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case cmdListText:
		rctl.List(*listJSON)
	case cmdGetText:
		rctl.Get(*getID, *getManifest)
	case cmdEnableText:
		rctl.Enable(*enableID)
	case cmdDisableText:
		rctl.Disable(*disableID)
	case cmdRollText:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey)
	case cmdSchedupText:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed)
	}
}
Ejemplo n.º 14
0
func main() {
	cmd, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	kv := kp.NewConsulStore(client)
	logger := logging.NewLogger(logrus.Fields{})
	pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger)
	session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil)
	if err != nil {
		log.Fatalf("Could not create session: %s", err)
	}

	switch cmd {
	case cmdCreateText:
		az := fields.AvailabilityZone(*createAZ)
		cn := fields.ClusterName(*createName)
		podID := types.PodID(*createPodID)
		selector := selectorFrom(az, cn, podID)
		pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session)

		annotations := *createAnnotations
		var parsedAnnotations map[string]interface{}
		err := json.Unmarshal([]byte(annotations), &parsedAnnotations)
		if err != nil {
			log.Fatalf("could not parse json: %v", err)
		}
		_, err = pccontrol.Create(parsedAnnotations)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
	case cmdGetText:
		az := fields.AvailabilityZone(*getAZ)
		cn := fields.ClusterName(*getName)
		podID := types.PodID(*getPodID)
		pcID := fields.ID(*getID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		pc, err := pccontrol.Get()
		if err != nil {
			log.Fatalf("Caught error while fetching pod cluster: %v", err)
		}

		bytes, err := json.Marshal(pc)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal PC as JSON")
		}
		fmt.Printf("%s", bytes)
	case cmdDeleteText:
		az := fields.AvailabilityZone(*deleteAZ)
		cn := fields.ClusterName(*deleteName)
		podID := types.PodID(*deletePodID)
		pcID := fields.ID(*deleteID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		errors := pccontrol.Delete()
		if len(errors) >= 1 {
			for _, err := range errors {
				_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error())))
			}
			os.Exit(1)
		}
	case cmdUpdateText:
		az := fields.AvailabilityZone(*updateAZ)
		cn := fields.ClusterName(*updateName)
		podID := types.PodID(*updatePodID)
		pcID := fields.ID(*updateID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		var annotations fields.Annotations
		err := json.Unmarshal([]byte(*updateAnnotations), &annotations)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err)))
			os.Exit(1)
		}

		pc, err := pccontrol.Update(annotations)
		if err != nil {
			log.Fatalf("Error during PodCluster update: %v\n%v", err, pc)
			os.Exit(1)
		}
		bytes, err := json.Marshal(pc)
		if err != nil {
			log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc)
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	case cmdListText:
		pcs, err := pcstore.List()
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err)))
			os.Exit(1)
		}

		bytes, err := json.Marshal(pcs)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err)))
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Ejemplo n.º 15
0
Archivo: main.go Proyecto: rudle/p2
func main() {
	kingpin.CommandLine.Name = "p2-replicate"
	kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.

	Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com

	This will take the pod whose manifest is located at helloworld.yaml and
	deploy it to the three nodes aws1.example.com, aws2.example.com, and
	aws3.example.com

	Because of --min-nodes 2, the replicator will ensure that at least two healthy
	nodes remain up at all times, according to p2's health checks.
`

	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	labeler := labels.NewConsulApplicator(client, 3)
	healthChecker := checker.NewConsulHealthChecker(client)

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	logger := logging.NewLogger(logrus.Fields{
		"pod": manifest.ID(),
	})
	logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}

	nodes := make([]types.NodeName, len(*hosts))
	for i, host := range *hosts {
		nodes[i] = types.NodeName(host)
	}

	lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
	repl, err := replication.NewReplicator(
		manifest,
		logger,
		nodes,
		len(*hosts)-*minNodes,
		store,
		labeler,
		healthChecker,
		health.HealthState(*threshold),
		lockMessage,
		replication.NoTimeout,
	)
	if err != nil {
		log.Fatalf("Could not initialize replicator: %s", err)
	}

	replication, errCh, err := repl.InitializeReplication(
		*overrideLock,
		*ignoreControllers,
		*concurrentRealityChecks,
		0,
	)
	if err != nil {
		log.Fatalf("Unable to initialize replication: %s", err)
	}

	// auto-drain this channel
	go func() {
		for range errCh {
		}
	}()

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		replication.Cancel()
		os.Exit(1)
	}()

	replication.Enact()
}
Ejemplo n.º 16
0
Archivo: main.go Proyecto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = &logrus.TextFormatter{}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	client := kp.NewConsulClient(opts)
	labeler := labels.NewConsulApplicator(client, 3)
	sched := rc.NewApplicatorScheduler(labeler)
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = rc.NewApplicatorScheduler(httpLabeler)
	}
	rctl := RCtl{
		baseClient: client,
		rcs:        rcstore.NewConsul(client, 3),
		rls:        rollstore.NewConsul(client),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case CMD_CREATE:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels)
	case CMD_DELETE:
		rctl.Delete(*deleteID, *deleteForce)
	case CMD_REPLICAS:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case CMD_LIST:
		rctl.List(*listJSON)
	case CMD_GET:
		rctl.Get(*getID, *getManifest)
	case CMD_ENABLE:
		rctl.Enable(*enableID)
	case CMD_DISABLE:
		rctl.Disable(*disableID)
	case CMD_ROLL:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollDelete)
	case CMD_FARM:
		rctl.Farm()
	case CMD_SCHEDUP:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed, *schedupDelete)
	}
}
Ejemplo n.º 17
0
Archivo: main.go Proyecto: rudle/p2
func main() {
	cmd, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	applicator := labels.NewConsulApplicator(client, 3)
	exitCode := 0

	switch cmd {
	case CmdShow:
		labelType, err := labels.AsType(*showLabelType)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error while parsing label type. Check the commandline.\n%v\n", err)
			exitCode = 1
			break
		}

		labelsForEntity, err := applicator.GetLabels(labelType, *showID)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err)
			exitCode = 1
			break
		}
		fmt.Printf("%s/%s: %s\n", labelType, *showID, labelsForEntity.Labels.String())
		return
	case CmdApply:
		// if xnor(selector, id)
		if (*applySubjectSelector == "") == (*applySubjectID == "") {
			fmt.Fprint(os.Stderr, "Must pass either an ID or a selector for objects to apply the given label to")
			exitCode = 1
			break
		}
		autoConfirm = *applyAutoConfirm

		labelType, err := labels.AsType(*applyLabelType)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Unrecognized type %s. Check the commandline and documentation.\nhttps://godoc.org/github.com/square/p2/pkg/labels#pkg-constants\n", *applyLabelType)
			exitCode = 1
			break
		}

		additiveLabels := *applyAddititiveLabels
		destructiveKeys := *applyDestructiveLabels

		var matches []labels.Labeled
		if *applySubjectSelector != "" {
			subject, err := klabels.Parse(*applySubjectSelector)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Error while parsing subject label. Check the syntax.\n%v\n", err)
				exitCode = 1
				break
			}

			cachedMatch := false
			matches, err = applicator.GetMatches(subject, labelType, cachedMatch)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Error while finding label matches. Check the syntax.\n%v\n", err)
				exitCode = 1
				break
			}
		} else {
			matches = []labels.Labeled{{ID: *applySubjectID}}
		}

		if len(additiveLabels) > 0 {
			fmt.Printf("labels to be added: %s\n", klabels.Set(additiveLabels))
		}

		if len(destructiveKeys) > 0 {
			fmt.Printf("labels to be removed: %s\n", destructiveKeys)
		}

		var labelsForEntity labels.Labeled
		for _, match := range matches {
			entityID := match.ID

			err := applyLabels(applicator, entityID, labelType, additiveLabels, destructiveKeys)
			if err != nil {
				fmt.Printf("Encountered err during labeling, %v", err)
				exitCode = 1
			}

			labelsForEntity, err = applicator.GetLabels(labelType, entityID)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err)
				exitCode = 1
				continue
			}
			fmt.Printf("%s/%s: %s\n", labelType, entityID, labelsForEntity.Labels.String())
		}
		break
	}

	os.Exit(exitCode)
}