Exemplo n.º 1
0
func VerifyConsulUp(timeout string) error {
	timeoutDur, err := time.ParseDuration(timeout)
	if err != nil {
		return err
	}
	if timeoutDur == 0 {
		return nil
	}

	store := kp.NewConsulStore(kp.Options{
		Token: *consulToken, // not actually necessary because this endpoint is unauthenticated
	})
	consulIsUp := make(chan struct{})
	go func() {
		for {
			time.Sleep(200 * time.Millisecond)
			err := store.Ping()
			if err == nil {
				consulIsUp <- struct{}{}
				return
			}
		}
	}()
	select {
	case <-time.After(timeoutDur):
		return util.Errorf("Consul did not start or was not available after %v", timeoutDur)
	case <-consulIsUp:
		return nil
	}
}
Exemplo n.º 2
0
// MonitorPodHealth is meant to be a long running go routine.
// MonitorPodHealth reads from a consul store to determine which
// services should be running on the host. MonitorPodHealth
// runs a CheckHealth routine to monitor the health of each
// service and kills routines for services that should no
// longer be running.
func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) {
	var store kp.Store

	consul := config.ConsulAddress
	node := config.NodeName
	pods := []PodWatch{}
	authtoken, err := preparer.LoadConsulToken(config.ConsulTokenPath)
	if err != nil {
		logger.WithField("inner_err", err).Warningln("Could not load consul token")
	}

	store = kp.NewConsulStore(kp.Options{
		Address: consul,
		HTTPS:   false,
		Token:   authtoken,
		Client:  net.NewHeaderClient(nil, http.DefaultTransport),
	})
	pods = updateHealthMonitors(store, pods, node, logger)
	for {
		select {
		case <-time.After(POLL_KV_FOR_PODS):
			// check if pods have been added or removed
			// starts monitor routine for new pods
			// kills monitor routine for removed pods
			pods = updateHealthMonitors(store, pods, node, logger)
		case <-shutdownCh:
			return
		}
	}
}
Exemplo n.º 3
0
func VerifyReality(waitTime time.Duration, consulID, agentID string) error {
	quit := make(chan struct{})
	defer close(quit)
	store := kp.NewConsulStore(kp.Options{
		Token: *consulToken,
	})
	hostname, _ := os.Hostname()
	waitChan := time.After(waitTime)
	for {
		select {
		case <-waitChan:
			return util.Errorf("Consul and/or Preparer weren't in the reality store within %s", waitTime)
		case <-time.After(100 * time.Millisecond):
			hasConsul := false
			hasPreparer := false
			results, _, err := store.ListPods(kp.RealityPath(hostname))
			if err != nil {
				log.Printf("Error looking for pods: %s\n", err)
				continue
			}
			for _, res := range results {
				if res.Manifest.ID() == consulID {
					hasConsul = true
				} else if res.Manifest.ID() == agentID {
					hasPreparer = true
				}
			}
			if hasConsul && hasPreparer {
				return nil
			}
		}
	}
}
Exemplo n.º 4
0
func main() {
	quitCh := make(chan struct{})

	_, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsStore := dsstore.NewConsul(client, 3, &logger)
	kpStore := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, quitCh, logger)

	dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches)

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitCh)
	}()

	dsf.Start(quitCh)
}
Exemplo n.º 5
0
Arquivo: main.go Projeto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if len(*manifests) == 0 {
		kingpin.Usage()
		log.Fatalln("No manifests given")
	}

	for _, manifestPath := range *manifests {
		manifest, err := pods.ManifestFromPath(manifestPath)
		if err != nil {
			log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err)
		}
		path := kp.IntentPath(*nodeName, manifest.ID())
		if *hookGlobal {
			path = kp.HookPath(manifest.ID())
		}
		duration, err := store.SetPod(path, manifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err)
		}
		log.Printf("Scheduling %s took %s\n", manifest.ID(), duration)
	}
}
Exemplo n.º 6
0
func NewConsulHealthChecker(client consulutil.ConsulClient) ConsulHealthChecker {
	return consulHealthChecker{
		client:      client,
		kv:          client.KV(),
		consulStore: kp.NewConsulStore(client),
	}
}
Exemplo n.º 7
0
func verifyHealthChecks(config *preparer.PreparerConfig, services []string) error {
	opts := kp.Options{
		Address: config.ConsulAddress,
		HTTPS:   false,
	}
	store := kp.NewConsulStore(opts)

	time.Sleep(5 * time.Second)
	// check consul for health information for each app
	name, err := os.Hostname()
	if err != nil {
		return err
	}
	for _, sv := range services {
		res, err := store.GetHealth(sv, name)
		if err != nil {
			return err
		} else if (res == kp.WatchResult{}) {
			err = fmt.Errorf("No results for %s", sv)
			return err
		} else {
			fmt.Println(res)
		}
	}

	// if it reaches here it means health checks
	// are being written to the KV store properly
	return nil
}
Exemplo n.º 8
0
Arquivo: rm.go Projeto: petertseng/p2
func (rm *P2RM) configureStorage(client consulutil.ConsulClient, labeler labels.ApplicatorWithoutWatches) {
	rm.Client = client
	rm.Store = kp.NewConsulStore(client)
	rm.RCStore = rcstore.NewConsul(client, labeler, 5)
	rm.Labeler = labeler
	rm.PodStore = podstore.NewConsul(client.KV())
}
Exemplo n.º 9
0
// GetStore constructs a key-value store from the given configuration.
func (c *PreparerConfig) GetStore() (kp.Store, error) {
	opts, err := c.getOpts()
	if err != nil {
		return nil, err
	}
	client := kp.NewConsulClient(opts)
	return kp.NewConsulStore(client), nil
}
Exemplo n.º 10
0
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts, labeler := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	if *logJSON {
		logger.Logger.Formatter = &logrus.JSONFormatter{}
	} else {
		logger.Logger.Formatter = &logrus.TextFormatter{}
	}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	sched := scheduler.NewApplicatorScheduler(labeler)

	rctl := rctlParams{
		httpClient: httpClient,
		baseClient: client,
		rcs:        rcstore.NewConsul(client, labeler, 3),
		rls:        rollstore.NewConsul(client, labeler, nil),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case cmdCreateText:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels)
	case cmdDeleteText:
		rctl.Delete(*deleteID, *deleteForce)
	case cmdReplicasText:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case cmdListText:
		rctl.List(*listJSON)
	case cmdGetText:
		rctl.Get(*getID, *getManifest)
	case cmdEnableText:
		rctl.Enable(*enableID)
	case cmdDisableText:
		rctl.Disable(*disableID)
	case cmdRollText:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey)
	case cmdSchedupText:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed)
	}
}
Exemplo n.º 11
0
// MonitorPodHealth is meant to be a long running go routine.
// MonitorPodHealth reads from a consul store to determine which
// services should be running on the host. MonitorPodHealth
// runs a CheckHealth routine to monitor the health of each
// service and kills routines for services that should no
// longer be running.
func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) {
	client, err := config.GetConsulClient()
	if err != nil {
		// A bad config should have already produced a nice, user-friendly error message.
		logger.WithError(err).Fatalln("error creating health monitor KV client")
	}
	store := kp.NewConsulStore(client)
	healthManager := store.NewHealthManager(config.NodeName, *logger)

	node := config.NodeName
	pods := []PodWatch{}

	watchQuitCh := make(chan struct{})
	watchErrCh := make(chan error)
	watchPodCh := make(chan []kp.ManifestResult)
	go store.WatchPods(
		kp.REALITY_TREE,
		node,
		watchQuitCh,
		watchErrCh,
		watchPodCh,
	)

	// if GetClient fails it means the certfile/keyfile/cafile were
	// invalid or did not exist. It makes sense to throw a fatal error
	secureClient, err := config.GetClient(time.Duration(*HEALTHCHECK_TIMEOUT) * time.Second)
	if err != nil {
		logger.WithError(err).Fatalln("failed to get http client for this preparer")
	}

	insecureClient, err := config.GetInsecureClient(time.Duration(*HEALTHCHECK_TIMEOUT) * time.Second)
	if err != nil {
		logger.WithError(err).Fatalln("failed to get http client for this preparer")
	}

	for {
		select {
		case results := <-watchPodCh:
			// check if pods have been added or removed
			// starts monitor routine for new pods
			// kills monitor routine for removed pods
			pods = updatePods(healthManager, secureClient, insecureClient, pods, results, node, logger)
		case err := <-watchErrCh:
			logger.WithError(err).Errorln("there was an error reading reality manifests for health monitor")
		case <-shutdownCh:
			for _, pod := range pods {
				pod.shutdownCh <- true
			}
			close(watchQuitCh)
			healthManager.Close()
			return
		}
	}
}
Exemplo n.º 12
0
func main() {
	kingpin.Version(version.VERSION)
	_, opts, _ := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	podStore := podstore.NewConsul(client.KV())

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if *manifestPath == "" {
		kingpin.Usage()
		log.Fatalln("No manifest given")
	}

	podManifest, err := manifest.FromPath(*manifestPath)
	if err != nil {
		log.Fatalf("Could not read manifest at %s: %s\n", *manifestPath, err)
	}

	out := schedule.Output{
		PodID: podManifest.ID(),
	}
	if *uuidPod {
		out.PodUniqueKey, err = podStore.Schedule(podManifest, types.NodeName(*nodeName))
		if err != nil {
			log.Fatalf("Could not schedule pod: %s", err)
		}
	} else {

		// Legacy pod
		podPrefix := kp.INTENT_TREE
		if *hookGlobal {
			podPrefix = kp.HOOK_TREE
		}
		_, err := store.SetPod(podPrefix, types.NodeName(*nodeName), podManifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", podManifest.ID(), err)
		}
	}

	outBytes, err := json.Marshal(out)
	if err != nil {
		log.Fatalf("Successfully scheduled manifest but couldn't marshal JSON output")
	}

	fmt.Println(string(outBytes))
}
Exemplo n.º 13
0
Arquivo: main.go Projeto: rudle/p2
// NewP2RM is a constructor for the P2RM type. It will generate the necessary
// storage types based on its api.Client argument
func NewP2RM(client consulutil.ConsulClient, podName string, nodeName types.NodeName) *P2RM {
	rm := &P2RM{}
	rm.Client = client
	rm.Store = kp.NewConsulStore(client)
	rm.RCStore = rcstore.NewConsul(client, 5)
	rm.Labeler = labels.NewConsulApplicator(client, 3)
	rm.LabelID = path.Join(nodeName.String(), podName)
	rm.PodName = podName
	rm.NodeName = nodeName

	return rm
}
Exemplo n.º 14
0
func NewConsul(c consulutil.ConsulClient, labeler rollLabeler, logger *logging.Logger) Store {
	if logger == nil {
		logger = &logging.DefaultLogger
	}
	return consulStore{
		kv:      c.KV(),
		rcstore: rcstore.NewConsul(c, labeler, 3),
		logger:  *logger,
		labeler: labeler,
		store:   kp.NewConsulStore(c),
	}
}
Exemplo n.º 15
0
Arquivo: setup.go Projeto: rudle/p2
// GetStore constructs a key-value store from the given configuration.
func (c *PreparerConfig) GetStore() (kp.Store, error) {
	c.mux.Lock()
	defer c.mux.Unlock()
	if c.store != nil {
		return c.store, nil
	}
	opts, err := c.getOpts()
	if err != nil {
		return nil, err
	}
	store := kp.NewConsulStore(kp.NewConsulClient(opts))
	c.store = store
	return store, nil
}
Exemplo n.º 16
0
Arquivo: main.go Projeto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}
	if *podClusters {
		watchPodClusters(client)
	} else {
		podPrefix := kp.INTENT_TREE
		if *watchReality {
			podPrefix = kp.REALITY_TREE
		} else if *hooks {
			podPrefix = kp.HOOK_TREE
		}
		log.Printf("Watching manifests at %s/%s/\n", podPrefix, *nodeName)

		quit := make(chan struct{})
		errChan := make(chan error)
		podCh := make(chan []kp.ManifestResult)
		go store.WatchPods(podPrefix, types.NodeName(*nodeName), quit, errChan, podCh)
		for {
			select {
			case results := <-podCh:
				if len(results) == 0 {
					fmt.Println(fmt.Sprintf("No manifests exist for %s under %s (they may have been deleted)", *nodeName, podPrefix))
				} else {
					for _, result := range results {
						if err := result.Manifest.Write(os.Stdout); err != nil {
							log.Fatalf("write error: %v", err)
						}
					}
				}
			case err := <-errChan:
				log.Fatalf("Error occurred while listening to pods: %s", err)
			}
		}
	}
}
Exemplo n.º 17
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	store := kp.NewConsulStore(kp.Options{
		Address: *consulAddress,
		Token:   *consulToken,
		Client:  net.NewHeaderClient(*headers, http.DefaultTransport),
		HTTPS:   *https,
	})

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	path := kp.IntentPath(*nodeName)
	if *watchReality {
		path = kp.RealityPath(*nodeName)
	} else if *hookTypeName != "" {
		hookType, err := hooks.AsHookType(*hookTypeName)
		if err != nil {
			log.Fatalln(err)
		}
		path = kp.HookPath(hookType, *nodeName)
	}
	log.Printf("Watching manifests at %s\n", path)

	quit := make(chan struct{})
	errChan := make(chan error)
	podCh := make(chan kp.ManifestResult)
	go store.WatchPods(path, quit, errChan, podCh)
	for {
		select {
		case result := <-podCh:
			fmt.Println("")
			result.Manifest.Write(os.Stdout)
		case err := <-errChan:
			log.Fatalf("Error occurred while listening to pods: %s", err)
		}
	}
}
Exemplo n.º 18
0
func verifyHealthChecks(config *preparer.PreparerConfig, services []string) error {
	client, err := config.GetConsulClient()
	if err != nil {
		return err
	}
	store := kp.NewConsulStore(client)

	time.Sleep(30 * time.Second)
	// check consul for health information for each app
	name, err := os.Hostname()
	if err != nil {
		return err
	}

	node := types.NodeName(name)
	for _, sv := range services {
		res, err := store.GetHealth(sv, node)
		if err != nil {
			return err
		} else if (res == kp.WatchResult{}) {
			return fmt.Errorf("No results for %s: \n\n %s", sv, targetLogs())
		} else if res.Status != string(health.Passing) {
			return fmt.Errorf("%s did not pass health check: \n\n %s", sv, targetLogs())
		} else {
			fmt.Println(res)
		}
	}

	for _, sv := range services {
		res, err := store.GetServiceHealth(sv)
		getres, _ := store.GetHealth(sv, node)
		if err != nil {
			return err
		}
		val := res[kp.HealthPath(sv, node)]
		if getres.Id != val.Id || getres.Service != val.Service || getres.Status != val.Status {
			return fmt.Errorf("GetServiceHealth failed %+v: \n\n%s", res, targetLogs())
		}
	}

	// if it reaches here it means health checks
	// are being written to the KV store properly
	return nil
}
Exemplo n.º 19
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	store := kp.NewConsulStore(kp.Options{
		Address: *consulAddress,
		Token:   *consulToken,
		Client:  net.NewHeaderClient(*headers, http.DefaultTransport),
		HTTPS:   *https,
	})

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if len(*manifests) == 0 {
		kingpin.Usage()
		log.Fatalln("No manifests given")
	}

	for _, manifestPath := range *manifests {
		manifest, err := pods.ManifestFromPath(manifestPath)
		if err != nil {
			log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err)
		}
		path := kp.IntentPath(*nodeName, manifest.ID())
		if *hookTypeName != "" {
			hookType, err := hooks.AsHookType(*hookTypeName)
			if err != nil {
				log.Fatalln(err)
			}
			path = kp.HookPath(hookType, manifest.ID())
		}
		duration, err := store.SetPod(path, *manifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err)
		}
		log.Printf("Scheduling %s took %s\n", manifest.ID(), duration)
	}
}
Exemplo n.º 20
0
func scheduleForThisHost(manifest manifest.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.INTENT_TREE, types.NodeName(hostname), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.REALITY_TREE, types.NodeName(hostname), manifest)
		return err
	}
	return nil
}
Exemplo n.º 21
0
func ScheduleForThisHost(manifest pods.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.IntentPath(hostname, manifest.ID()), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.RealityPath(hostname, manifest.ID()), manifest)
		return err
	}
	return nil
}
Exemplo n.º 22
0
Arquivo: main.go Projeto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	path := kp.IntentPath(*nodeName)
	if *watchReality {
		path = kp.RealityPath(*nodeName)
	} else if *hooks {
		path = kp.HookPath()
	}
	log.Printf("Watching manifests at %s\n", path)

	quit := make(chan struct{})
	errChan := make(chan error)
	podCh := make(chan []kp.ManifestResult)
	go store.WatchPods(path, quit, errChan, podCh)
	for {
		select {
		case results := <-podCh:
			if len(results) == 0 {
				fmt.Println(fmt.Sprintf("No manifest exists at key %s (it may have been deleted)", path))
			} else {
				for _, result := range results {
					fmt.Println("")
					result.Manifest.Write(os.Stdout)
				}
			}
		case err := <-errChan:
			log.Fatalf("Error occurred while listening to pods: %s", err)
		}
	}
}
Exemplo n.º 23
0
func makeStore(t *testing.T) (kp.Store, *testutil.TestServer) {
	if testing.Short() {
		t.Skip("skipping test dependendent on consul because of short mode")
	}

	// testutil.NewTestServerConfig will skip the test if "consul" isn't in the system path.
	// We'd rather the test fail.
	defer func() {
		if t.Skipped() {
			t.Fatalf("test skipped by testutil package")
		}
	}()

	// Create server
	server := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
		// consul output in test output is noisy
		c.Stdout = ioutil.Discard
		c.Stderr = ioutil.Discard

		// If ports are left to their defaults, this test conflicts
		// with the test consul servers in pkg/kp
		var offset uint64
		idx := int(atomic.AddUint64(&offset, 1))
		c.Ports = &testutil.TestPortConfig{
			DNS:     26000 + idx,
			HTTP:    27000 + idx,
			RPC:     28000 + idx,
			SerfLan: 29000 + idx,
			SerfWan: 30000 + idx,
			Server:  31000 + idx,
		}
	})

	client := kp.NewConsulClient(kp.Options{
		Address: server.HTTPAddr,
	})
	store := kp.NewConsulStore(client)
	return store, server
}
Exemplo n.º 24
0
Arquivo: main.go Projeto: drcapulet/p2
func main() {
	// CLI takes a hostname, a token and recursively deletes all pods
	// in the reality tree. This is useful if any pods on a host have
	// been manually altered in some way and need to be restored to
	// a known state.
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	pods, _, err := store.ListPods(kp.REALITY_TREE, types.NodeName(*nodeName))
	if err != nil {
		log.Fatalf("Could not list pods for node %v: %v", *nodeName, err)
	}
	for _, pod := range pods {
		log.Printf("Deleting %v from reality\n", pod.Manifest.ID())
		_, err := store.DeletePod(kp.REALITY_TREE, types.NodeName(*nodeName), pod.Manifest.ID())
		if err != nil {
			log.Fatalf("Could not remove %v from pod reality tree: %v", err)
		}
	}
}
Exemplo n.º 25
0
Arquivo: main.go Projeto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	intents, _, err := store.ListPods(kp.INTENT_TREE)
	if err != nil {
		message := "Could not list intent kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}
	realities, _, err := store.ListPods(kp.REALITY_TREE)
	if err != nil {
		message := "Could not list reality kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}

	statusMap := make(map[string]map[string]inspect.NodePodStatus)

	for _, kvp := range intents {
		if inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil {
			log.Fatal(err)
		}
	}

	for _, kvp := range realities {
		if inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil {
			log.Fatal(err)
		}
	}

	hchecker := checker.NewConsulHealthChecker(client)
	for podId := range statusMap {
		resultMap, err := hchecker.Service(podId)
		if err != nil {
			log.Fatalf("Could not retrieve health checks for pod %s: %s", podId, err)
		}

		for node, result := range resultMap {
			if *filterNodeName != "" && node != *filterNodeName {
				continue
			}

			old := statusMap[podId][node]
			old.Health = result.Status
			statusMap[podId][node] = old
		}
	}

	// Keep this switch in sync with the enum options for the "format" flag. Rethink this
	// design once there are many different formats.
	switch *format {
	case "tree":
		// Native data format is already a "tree"
		enc := json.NewEncoder(os.Stdout)
		enc.Encode(statusMap)
	case "list":
		// "List" format is a flattened version of "tree"
		output := make([]inspect.NodePodStatus, 0)
		for podId, nodes := range statusMap {
			for node, status := range nodes {
				status.PodId = podId
				status.NodeName = node
				output = append(output, status)
			}
		}
		enc := json.NewEncoder(os.Stdout)
		enc.Encode(output)
	default:
		log.Fatalf("unrecognized format: %s", *format)
	}

}
Exemplo n.º 26
0
Arquivo: main.go Projeto: tomzhang/p2
func main() {
	kingpin.CommandLine.Name = "p2-replicate"
	kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.

	Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com

	This will take the pod whose manifest is located at helloworld.yaml and
	deploy it to the three nodes aws1.example.com, aws2.example.com, and
	aws3.example.com

	Because of --min-nodes 2, the replicator will ensure that at least two healthy
	nodes remain up at all times, according to p2's health checks.
`

	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	manifest, err := pods.ManifestFromURI(*manifestUri)
	if err != nil {
		log.Fatalf("%s", err)
	}

	logger := logging.NewLogger(logrus.Fields{
		"pod": manifest.ID(),
	})
	logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}
	lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
	repl, err := replication.NewReplicator(
		manifest,
		logger,
		*hosts,
		len(*hosts)-*minNodes,
		store,
		healthChecker,
		health.HealthState(*threshold),
		lockMessage,
	)
	if err != nil {
		log.Fatalf("Could not initialize replicator: %s", err)
	}

	replication, errCh, err := repl.InitializeReplication(*overrideLock)
	if err != nil {
		log.Fatalf("Unable to initialize replication: %s", err)
	}

	// auto-drain this channel
	go func() {
		for range errCh {
		}
	}()

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		replication.Cancel()
		os.Exit(1)
	}()

	replication.Enact()
}
Exemplo n.º 27
0
func makeStore(t *testing.T) (Store, consulutil.Fixture) {
	f := consulutil.NewFixture(t)
	store := kp.NewConsulStore(f.Client)
	return store, f
}
Exemplo n.º 28
0
func main() {
	replicate.Version(version.VERSION)
	replicate.Parse(os.Args[1:])

	opts := kp.Options{
		Address: *consulUrl,
		Token:   *consulToken,
		Client:  net.NewHeaderClient(*headers, http.DefaultTransport),
		HTTPS:   *https,
	}
	store := kp.NewConsulStore(opts)
	healthChecker := health.NewConsulHealthChecker(opts)

	// Fetch manifest (could be URI) into temp file
	localMan, err := ioutil.TempFile("", "tempmanifest")
	defer os.Remove(localMan.Name())
	if err != nil {
		log.Fatalln("Couldn't create tempfile")
	}
	if err := uri.URICopy(*manifestUri, localMan.Name()); err != nil {
		log.Fatalf("Could not fetch manifest: %s", err)
	}

	manifest, err := pods.ManifestFromPath(localMan.Name())
	if err != nil {
		log.Fatalf("Invalid manifest: %s", err)
	}

	healthResults, err := healthChecker.Service(manifest.ID())
	if err != nil {
		log.Fatalf("Could not get initial health results: %s", err)
	}
	order := health.SortOrder{
		Nodes:  *hosts,
		Health: healthResults,
	}
	sort.Sort(order)

	repl := replication.Replicator{
		Manifest: *manifest,
		Store:    store,
		Health:   healthChecker,
		Nodes:    *hosts, // sorted by the health.SortOrder
		Active:   len(*hosts) - *minNodes,
		Logger: logging.NewLogger(logrus.Fields{
			"pod": manifest.ID(),
		}),
		Threshold: health.HealthState(*threshold),
	}
	repl.Logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	if err := repl.CheckPreparers(); err != nil {
		log.Fatalf("Preparer check failed: %s", err)
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}
	lock, err := store.NewLock(fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now()))
	if err != nil {
		log.Fatalf("Could not generate lock: %s", err)
	}
	// deferring on main is not particularly useful, since os.Exit will skip
	// the defer, so we have to manually destroy the lock at the right exit
	// paths
	go func() {
		for range time.Tick(10 * time.Second) {
			if err := lock.Renew(); err != nil {
				// if the renewal failed, then either the lock is already dead
				// or the consul agent cannot be reached
				log.Fatalf("Lock could not be renewed: %s", err)
			}
		}
	}()
	if err := repl.LockHosts(lock, *overrideLock); err != nil {
		lock.Destroy()
		log.Fatalf("Could not lock all hosts: %s", err)
	}

	// auto-drain this channel
	errs := make(chan error)
	go func() {
		for range errs {
		}
	}()

	quitch := make(chan struct{})
	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitch)
		lock.Destroy()
		os.Exit(1)
	}()

	repl.Enact(errs, quitch)
	lock.Destroy()
}
Exemplo n.º 29
0
func main() {
	cmd, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	kv := kp.NewConsulStore(client)
	logger := logging.NewLogger(logrus.Fields{})
	pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger)
	session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil)
	if err != nil {
		log.Fatalf("Could not create session: %s", err)
	}

	switch cmd {
	case cmdCreateText:
		az := fields.AvailabilityZone(*createAZ)
		cn := fields.ClusterName(*createName)
		podID := types.PodID(*createPodID)
		selector := selectorFrom(az, cn, podID)
		pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session)

		annotations := *createAnnotations
		var parsedAnnotations map[string]interface{}
		err := json.Unmarshal([]byte(annotations), &parsedAnnotations)
		if err != nil {
			log.Fatalf("could not parse json: %v", err)
		}
		_, err = pccontrol.Create(parsedAnnotations)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
	case cmdGetText:
		az := fields.AvailabilityZone(*getAZ)
		cn := fields.ClusterName(*getName)
		podID := types.PodID(*getPodID)
		pcID := fields.ID(*getID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		pc, err := pccontrol.Get()
		if err != nil {
			log.Fatalf("Caught error while fetching pod cluster: %v", err)
		}

		bytes, err := json.Marshal(pc)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal PC as JSON")
		}
		fmt.Printf("%s", bytes)
	case cmdDeleteText:
		az := fields.AvailabilityZone(*deleteAZ)
		cn := fields.ClusterName(*deleteName)
		podID := types.PodID(*deletePodID)
		pcID := fields.ID(*deleteID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		errors := pccontrol.Delete()
		if len(errors) >= 1 {
			for _, err := range errors {
				_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error())))
			}
			os.Exit(1)
		}
	case cmdUpdateText:
		az := fields.AvailabilityZone(*updateAZ)
		cn := fields.ClusterName(*updateName)
		podID := types.PodID(*updatePodID)
		pcID := fields.ID(*updateID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		var annotations fields.Annotations
		err := json.Unmarshal([]byte(*updateAnnotations), &annotations)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err)))
			os.Exit(1)
		}

		pc, err := pccontrol.Update(annotations)
		if err != nil {
			log.Fatalf("Error during PodCluster update: %v\n%v", err, pc)
			os.Exit(1)
		}
		bytes, err := json.Marshal(pc)
		if err != nil {
			log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc)
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	case cmdListText:
		pcs, err := pcstore.List()
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err)))
			os.Exit(1)
		}

		bytes, err := json.Marshal(pcs)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err)))
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Exemplo n.º 30
0
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) {
	addHooks(preparerConfig, logger)

	if preparerConfig.ConsulAddress == "" {
		return nil, util.Errorf("No Consul address given to the preparer")
	}
	if preparerConfig.PodRoot == "" {
		return nil, util.Errorf("No pod root given to the preparer")
	}

	if preparerConfig.LogLevel != "" {
		lv, err := logrus.ParseLevel(preparerConfig.LogLevel)
		if err != nil {
			return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel)
		}
		logger.Logger.Level = lv
	}

	authPolicy, err := getDeployerAuth(preparerConfig)
	if err != nil {
		return nil, err
	}

	artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger)
	if err != nil {
		return nil, err
	}

	artifactRegistry, err := getArtifactRegistry(preparerConfig)
	if err != nil {
		return nil, err
	}

	client, err := preparerConfig.GetConsulClient()
	if err != nil {
		return nil, err
	}

	statusStore := statusstore.NewConsul(client)
	podStatusStore := podstatus.NewConsul(statusStore, kp.PreparerPodStatusNamespace)
	podStore := podstore.NewConsul(client.KV())

	store := kp.NewConsulStore(client)

	maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage
	if preparerConfig.MaxLaunchableDiskUsage != "" {
		maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage)
		if err != nil {
			return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err)
		}
	}

	err = os.MkdirAll(preparerConfig.PodRoot, 0755)
	if err != nil {
		return nil, util.Errorf("Could not create preparer pod directory: %s", err)
	}

	var logExec []string
	if len(preparerConfig.LogExec) > 0 {
		logExec = preparerConfig.LogExec
	} else {
		logExec = runit.DefaultLogExec()
	}

	finishExec := pods.NopFinishExec
	var podProcessReporter *podprocess.Reporter
	if preparerConfig.PodProcessReporterConfig.FullyConfigured() {
		podProcessReporterLogger := logger.SubLogger(logrus.Fields{
			"component": "PodProcessReporter",
		})

		podProcessReporter, err = podprocess.New(preparerConfig.PodProcessReporterConfig, podProcessReporterLogger, podStatusStore)
		if err != nil {
			return nil, err
		}

		finishExec = preparerConfig.PodProcessReporterConfig.FinishExec()
	}

	var hooksManifest manifest.Manifest
	var hooksPod *pods.Pod
	if preparerConfig.HooksManifest != NoHooksSentinelValue {
		if preparerConfig.HooksManifest == "" {
			return nil, util.Errorf("Most provide a hooks_manifest or sentinel value %q to indicate that there are no hooks", NoHooksSentinelValue)
		}

		hooksManifest, err = manifest.FromBytes([]byte(preparerConfig.HooksManifest))
		if err != nil {
			return nil, util.Errorf("Could not parse configured hooks manifest: %s", err)
		}
		hooksPodFactory := pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName)
		hooksPod = hooksPodFactory.NewHookPod(hooksManifest.ID())
	}
	return &Preparer{
		node:                   preparerConfig.NodeName,
		store:                  store,
		hooks:                  hooks.Hooks(preparerConfig.HooksDirectory, preparerConfig.PodRoot, &logger),
		podStatusStore:         podStatusStore,
		podStore:               podStore,
		Logger:                 logger,
		podFactory:             pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName),
		authPolicy:             authPolicy,
		maxLaunchableDiskUsage: maxLaunchableDiskUsage,
		finishExec:             finishExec,
		logExec:                logExec,
		logBridgeBlacklist:     preparerConfig.LogBridgeBlacklist,
		artifactVerifier:       artifactVerifier,
		artifactRegistry:       artifactRegistry,
		PodProcessReporter:     podProcessReporter,
		hooksManifest:          hooksManifest,
		hooksPod:               hooksPod,
		hooksExecDir:           preparerConfig.HooksDirectory,
	}, nil
}