Esempio n. 1
0
File: main.go Progetto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if len(*manifests) == 0 {
		kingpin.Usage()
		log.Fatalln("No manifests given")
	}

	for _, manifestPath := range *manifests {
		manifest, err := pods.ManifestFromPath(manifestPath)
		if err != nil {
			log.Fatalf("Could not read manifest at %s: %s\n", manifestPath, err)
		}
		path := kp.IntentPath(*nodeName, manifest.ID())
		if *hookGlobal {
			path = kp.HookPath(manifest.ID())
		}
		duration, err := store.SetPod(path, manifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", manifest.ID(), err)
		}
		log.Printf("Scheduling %s took %s\n", manifest.ID(), duration)
	}
}
Esempio n. 2
0
func main() {
	quitCh := make(chan struct{})

	_, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsStore := dsstore.NewConsul(client, 3, &logger)
	kpStore := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, quitCh, logger)

	dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches)

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitCh)
	}()

	dsf.Start(quitCh)
}
Esempio n. 3
0
func VerifyConsulUp(timeout string) error {
	timeoutDur, err := time.ParseDuration(timeout)
	if err != nil {
		return err
	}
	if timeoutDur == 0 {
		return nil
	}

	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken, // not actually necessary because this endpoint is unauthenticated
	}))
	consulIsUp := make(chan struct{})
	go func() {
		for {
			time.Sleep(200 * time.Millisecond)
			err := store.Ping()
			if err == nil {
				consulIsUp <- struct{}{}
				return
			}
		}
	}()
	select {
	case <-time.After(timeoutDur):
		return util.Errorf("Consul did not start or was not available after %v", timeoutDur)
	case <-consulIsUp:
		return nil
	}
}
Esempio n. 4
0
func VerifyReality(waitTime time.Duration, consulID, agentID string) error {
	quit := make(chan struct{})
	defer close(quit)
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, _ := os.Hostname()
	waitChan := time.After(waitTime)
	hasConsul := false
	hasPreparer := false
	for {
		select {
		case <-waitChan:
			return util.Errorf(
				"Consul and/or Preparer weren't in the reality store within %s (consul=%t, preparer=%t)",
				waitTime, hasConsul, hasPreparer)
		case <-time.After(100 * time.Millisecond):
			results, _, err := store.ListPods(kp.RealityPath(hostname))
			if err != nil {
				log.Printf("Error looking for pods: %s\n", err)
				continue
			}
			for _, res := range results {
				if res.Manifest.ID() == consulID {
					hasConsul = true
				} else if res.Manifest.ID() == agentID {
					hasPreparer = true
				}
			}
			if hasConsul && hasPreparer {
				return nil
			}
		}
	}
}
Esempio n. 5
0
// GetStore constructs a key-value store from the given configuration.
func (c *PreparerConfig) GetStore() (kp.Store, error) {
	opts, err := c.getOpts()
	if err != nil {
		return nil, err
	}
	client := kp.NewConsulClient(opts)
	return kp.NewConsulStore(client), nil
}
Esempio n. 6
0
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts, labeler := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	if *logJSON {
		logger.Logger.Formatter = &logrus.JSONFormatter{}
	} else {
		logger.Logger.Formatter = &logrus.TextFormatter{}
	}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	sched := scheduler.NewApplicatorScheduler(labeler)

	rctl := rctlParams{
		httpClient: httpClient,
		baseClient: client,
		rcs:        rcstore.NewConsul(client, labeler, 3),
		rls:        rollstore.NewConsul(client, labeler, nil),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case cmdCreateText:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels)
	case cmdDeleteText:
		rctl.Delete(*deleteID, *deleteForce)
	case cmdReplicasText:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case cmdListText:
		rctl.List(*listJSON)
	case cmdGetText:
		rctl.Get(*getID, *getManifest)
	case cmdEnableText:
		rctl.Enable(*enableID)
	case cmdDisableText:
		rctl.Disable(*disableID)
	case cmdRollText:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey)
	case cmdSchedupText:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed)
	}
}
Esempio n. 7
0
func main() {
	kingpin.Version(version.VERSION)
	_, opts, labeler := flags.ParseWithConsulOptions()

	consulClient := kp.NewConsulClient(opts)

	err := handlePodRemoval(consulClient, labeler)
	if err != nil {
		fmt.Fprintf(os.Stderr, err.Error())
		os.Exit(1)
	}
}
Esempio n. 8
0
func main() {
	kingpin.Version(version.VERSION)
	_, opts, _ := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	podStore := podstore.NewConsul(client.KV())

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if *manifestPath == "" {
		kingpin.Usage()
		log.Fatalln("No manifest given")
	}

	podManifest, err := manifest.FromPath(*manifestPath)
	if err != nil {
		log.Fatalf("Could not read manifest at %s: %s\n", *manifestPath, err)
	}

	out := schedule.Output{
		PodID: podManifest.ID(),
	}
	if *uuidPod {
		out.PodUniqueKey, err = podStore.Schedule(podManifest, types.NodeName(*nodeName))
		if err != nil {
			log.Fatalf("Could not schedule pod: %s", err)
		}
	} else {

		// Legacy pod
		podPrefix := kp.INTENT_TREE
		if *hookGlobal {
			podPrefix = kp.HOOK_TREE
		}
		_, err := store.SetPod(podPrefix, types.NodeName(*nodeName), podManifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", podManifest.ID(), err)
		}
	}

	outBytes, err := json.Marshal(out)
	if err != nil {
		log.Fatalf("Successfully scheduled manifest but couldn't marshal JSON output")
	}

	fmt.Println(string(outBytes))
}
Esempio n. 9
0
func (c *PreparerConfig) GetConsulClient() (consulutil.ConsulClient, error) {
	c.mux.Lock()
	defer c.mux.Unlock()
	if c.consulClient != nil {
		return c.consulClient, nil
	}
	opts, err := c.getOpts()
	if err != nil {
		return nil, err
	}
	client := kp.NewConsulClient(opts)
	c.consulClient = client
	return client, nil
}
Esempio n. 10
0
File: setup.go Progetto: rudle/p2
// GetStore constructs a key-value store from the given configuration.
func (c *PreparerConfig) GetStore() (kp.Store, error) {
	c.mux.Lock()
	defer c.mux.Unlock()
	if c.store != nil {
		return c.store, nil
	}
	opts, err := c.getOpts()
	if err != nil {
		return nil, err
	}
	store := kp.NewConsulStore(kp.NewConsulClient(opts))
	c.store = store
	return store, nil
}
Esempio n. 11
0
File: main.go Progetto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			fmt.Fprintf(os.Stderr, "error getting hostname. use --node to specify a node: %v\n", err)
			os.Exit(1)
		}
		*nodeName = hostname
	}

	rm := NewP2RM(kp.NewConsulClient(opts), *podName, types.NodeName(*nodeName))

	podIsManagedByRC, rcID, err := rm.checkForManagingReplicationController()
	if err != nil {
		os.Exit(2)
	}

	if !podIsManagedByRC {
		err = rm.deletePod()
		if err != nil {
			os.Exit(2)
		}
	}

	if podIsManagedByRC && !*deallocation {
		fmt.Fprintf(
			os.Stderr,
			"error: %s is managed by replication controller: %s\n"+
				"It's possible you meant you deallocate this pod on this node. If so, please confirm your intention with --deallocate\n", *nodeName, rcID)
		os.Exit(2)
	}

	if podIsManagedByRC && *deallocation {
		err = rm.decrementDesiredCount(rcID)
		if err != nil {
			fmt.Fprintf(os.Stderr,
				"Encountered error deallocating from the RC %s. You may attempt this command again or use `p2-rctl` to cleanup manually.\n%v",
				rcID,
				err)
		}
	}

	fmt.Printf("%s: successfully removed %s\n", rm.NodeName, rm.PodName)
}
Esempio n. 12
0
File: main.go Progetto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}
	if *podClusters {
		watchPodClusters(client)
	} else {
		podPrefix := kp.INTENT_TREE
		if *watchReality {
			podPrefix = kp.REALITY_TREE
		} else if *hooks {
			podPrefix = kp.HOOK_TREE
		}
		log.Printf("Watching manifests at %s/%s/\n", podPrefix, *nodeName)

		quit := make(chan struct{})
		errChan := make(chan error)
		podCh := make(chan []kp.ManifestResult)
		go store.WatchPods(podPrefix, types.NodeName(*nodeName), quit, errChan, podCh)
		for {
			select {
			case results := <-podCh:
				if len(results) == 0 {
					fmt.Println(fmt.Sprintf("No manifests exist for %s under %s (they may have been deleted)", *nodeName, podPrefix))
				} else {
					for _, result := range results {
						if err := result.Manifest.Write(os.Stdout); err != nil {
							log.Fatalf("write error: %v", err)
						}
					}
				}
			case err := <-errChan:
				log.Fatalf("Error occurred while listening to pods: %s", err)
			}
		}
	}
}
Esempio n. 13
0
func scheduleForThisHost(manifest manifest.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.INTENT_TREE, types.NodeName(hostname), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.REALITY_TREE, types.NodeName(hostname), manifest)
		return err
	}
	return nil
}
Esempio n. 14
0
func ScheduleForThisHost(manifest pods.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.IntentPath(hostname, manifest.ID()), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.RealityPath(hostname, manifest.ID()), manifest)
		return err
	}
	return nil
}
Esempio n. 15
0
func TestNewConsul(t *testing.T) {
	store := NewConsul(kp.NewConsulClient(kp.Options{}), nil)
	rollstore := store.(consulStore)
	if rollstore.kv == nil {
		t.Fatal("kv should not be nil for constructed rollstore")
	}

	if rollstore.rcstore == nil {
		t.Fatal("rcstore should not be nil for constructed rollstore")
	}

	if rollstore.labeler == nil {
		t.Fatal("labeler should not be nil for constructed rollstore")
	}

	if rollstore.store == nil {
		t.Fatal("store should not be nil for constructed rollstore")
	}
}
Esempio n. 16
0
func waitForPodLabeledWithRC(selector klabels.Selector, rcID fields.ID) error {
	client := kp.NewConsulClient(kp.Options{})
	applicator := labels.NewConsulApplicator(client, 1)

	// we have to label this hostname as being allowed to run tests
	host, err := os.Hostname()
	if err != nil {
		return fmt.Errorf("Could not get hostname: %s", err)
	}
	err = applicator.SetLabel(labels.NODE, host, "test", "yes")
	if err != nil {
		return fmt.Errorf("Could not set node selector label on %s: %v", host, err)
	}

	quitCh := make(chan struct{})
	defer close(quitCh)
	watchCh := applicator.WatchMatches(selector, labels.POD, quitCh)
	waitTime := time.After(30 * time.Second)
	for {
		select {
		case <-waitTime:
			return fmt.Errorf("Label selector %v wasn't matched before timeout: %s", selector, targetLogs())
		case res, ok := <-watchCh:
			if !ok {
				return fmt.Errorf("Label selector watch unexpectedly terminated")
			}
			if len(res) > 1 {
				return fmt.Errorf("Too many results found, should only have 1: %v", res)
			}
			if len(res) == 1 {
				_, podID, err := labels.NodeAndPodIDFromPodLabel(res[0])
				if err != nil {
					return err
				}
				if podID.String() != "hello" {
					return fmt.Errorf("Should have found the hello pod, instead found %s", podID)
				}
				return nil
			}
		}
	}
}
Esempio n. 17
0
File: main.go Progetto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	path := kp.IntentPath(*nodeName)
	if *watchReality {
		path = kp.RealityPath(*nodeName)
	} else if *hooks {
		path = kp.HookPath()
	}
	log.Printf("Watching manifests at %s\n", path)

	quit := make(chan struct{})
	errChan := make(chan error)
	podCh := make(chan []kp.ManifestResult)
	go store.WatchPods(path, quit, errChan, podCh)
	for {
		select {
		case results := <-podCh:
			if len(results) == 0 {
				fmt.Println(fmt.Sprintf("No manifest exists at key %s (it may have been deleted)", path))
			} else {
				for _, result := range results {
					fmt.Println("")
					result.Manifest.Write(os.Stdout)
				}
			}
		case err := <-errChan:
			log.Fatalf("Error occurred while listening to pods: %s", err)
		}
	}
}
Esempio n. 18
0
func makeStore(t *testing.T) (kp.Store, *testutil.TestServer) {
	if testing.Short() {
		t.Skip("skipping test dependendent on consul because of short mode")
	}

	// testutil.NewTestServerConfig will skip the test if "consul" isn't in the system path.
	// We'd rather the test fail.
	defer func() {
		if t.Skipped() {
			t.Fatalf("test skipped by testutil package")
		}
	}()

	// Create server
	server := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
		// consul output in test output is noisy
		c.Stdout = ioutil.Discard
		c.Stderr = ioutil.Discard

		// If ports are left to their defaults, this test conflicts
		// with the test consul servers in pkg/kp
		var offset uint64
		idx := int(atomic.AddUint64(&offset, 1))
		c.Ports = &testutil.TestPortConfig{
			DNS:     26000 + idx,
			HTTP:    27000 + idx,
			RPC:     28000 + idx,
			SerfLan: 29000 + idx,
			SerfWan: 30000 + idx,
			Server:  31000 + idx,
		}
	})

	client := kp.NewConsulClient(kp.Options{
		Address: server.HTTPAddr,
	})
	store := kp.NewConsulStore(client)
	return store, server
}
Esempio n. 19
0
func main() {
	// Parse custom flags + standard Consul routing options
	_, opts, _ := flags.ParseWithConsulOptions()

	client := kp.NewConsulClient(opts)
	podStore := kp_podstore.NewConsul(client.KV())
	podStatusStore := podstatus.NewConsul(statusstore.NewConsul(client), kp.PreparerPodStatusNamespace)

	logger := log.New(os.Stderr, "", 0)
	port := getPort(logger)

	lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
	if err != nil {
		logger.Fatalf("failed to listen: %v", err)
	}

	s := grpc.NewServer()
	podstore_protos.RegisterP2PodStoreServer(s, podstore.NewServer(podStore, podStatusStore))
	if err := s.Serve(lis); err != nil {
		logger.Fatalf("failed to serve: %v", err)
	}
}
Esempio n. 20
0
File: main.go Progetto: drcapulet/p2
func main() {
	// CLI takes a hostname, a token and recursively deletes all pods
	// in the reality tree. This is useful if any pods on a host have
	// been manually altered in some way and need to be restored to
	// a known state.
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	pods, _, err := store.ListPods(kp.REALITY_TREE, types.NodeName(*nodeName))
	if err != nil {
		log.Fatalf("Could not list pods for node %v: %v", *nodeName, err)
	}
	for _, pod := range pods {
		log.Printf("Deleting %v from reality\n", pod.Manifest.ID())
		_, err := store.DeletePod(kp.REALITY_TREE, types.NodeName(*nodeName), pod.Manifest.ID())
		if err != nil {
			log.Fatalf("Could not remove %v from pod reality tree: %v", err)
		}
	}
}
Esempio n. 21
0
File: main.go Progetto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	intents, _, err := store.ListPods(kp.INTENT_TREE)
	if err != nil {
		message := "Could not list intent kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}
	realities, _, err := store.ListPods(kp.REALITY_TREE)
	if err != nil {
		message := "Could not list reality kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}

	statusMap := make(map[string]map[string]inspect.NodePodStatus)

	for _, kvp := range intents {
		if inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil {
			log.Fatal(err)
		}
	}

	for _, kvp := range realities {
		if inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, *filterNodeName, *filterPodId, statusMap) != nil {
			log.Fatal(err)
		}
	}

	hchecker := checker.NewConsulHealthChecker(client)
	for podId := range statusMap {
		resultMap, err := hchecker.Service(podId)
		if err != nil {
			log.Fatalf("Could not retrieve health checks for pod %s: %s", podId, err)
		}

		for node, result := range resultMap {
			if *filterNodeName != "" && node != *filterNodeName {
				continue
			}

			old := statusMap[podId][node]
			old.Health = result.Status
			statusMap[podId][node] = old
		}
	}

	// Keep this switch in sync with the enum options for the "format" flag. Rethink this
	// design once there are many different formats.
	switch *format {
	case "tree":
		// Native data format is already a "tree"
		enc := json.NewEncoder(os.Stdout)
		enc.Encode(statusMap)
	case "list":
		// "List" format is a flattened version of "tree"
		output := make([]inspect.NodePodStatus, 0)
		for podId, nodes := range statusMap {
			for node, status := range nodes {
				status.PodId = podId
				status.NodeName = node
				output = append(output, status)
			}
		}
		enc := json.NewEncoder(os.Stdout)
		enc.Encode(output)
	default:
		log.Fatalf("unrecognized format: %s", *format)
	}

}
Esempio n. 22
0
File: main.go Progetto: tomzhang/p2
func main() {
	kingpin.CommandLine.Name = "p2-replicate"
	kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.

	Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com

	This will take the pod whose manifest is located at helloworld.yaml and
	deploy it to the three nodes aws1.example.com, aws2.example.com, and
	aws3.example.com

	Because of --min-nodes 2, the replicator will ensure that at least two healthy
	nodes remain up at all times, according to p2's health checks.
`

	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	manifest, err := pods.ManifestFromURI(*manifestUri)
	if err != nil {
		log.Fatalf("%s", err)
	}

	logger := logging.NewLogger(logrus.Fields{
		"pod": manifest.ID(),
	})
	logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}
	lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
	repl, err := replication.NewReplicator(
		manifest,
		logger,
		*hosts,
		len(*hosts)-*minNodes,
		store,
		healthChecker,
		health.HealthState(*threshold),
		lockMessage,
	)
	if err != nil {
		log.Fatalf("Could not initialize replicator: %s", err)
	}

	replication, errCh, err := repl.InitializeReplication(*overrideLock)
	if err != nil {
		log.Fatalf("Unable to initialize replication: %s", err)
	}

	// auto-drain this channel
	go func() {
		for range errCh {
		}
	}()

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		replication.Cancel()
		os.Exit(1)
	}()

	replication.Enact()
}
Esempio n. 23
0
File: main.go Progetto: rudle/p2
func main() {
	cmd, consulOpts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsstore := dsstore.NewConsul(client, 3, &logger)
	applicator := labels.NewConsulApplicator(client, 3)

	switch cmd {
	case CmdCreate:
		minHealth, err := strconv.Atoi(*createMinHealth)
		if err != nil {
			log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
		}
		name := ds_fields.ClusterName(*createName)

		manifest, err := manifest.FromPath(*createManifest)
		if err != nil {
			log.Fatalf("%s", err)
		}

		podID := manifest.ID()

		if *createTimeout <= time.Duration(0) {
			log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
		}

		selectorString := *createSelector
		if *createEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			selectorString = klabels.Nothing().String()
			log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("%v has been created in consul", ds.ID)
		fmt.Println()

	case CmdGet:
		id := ds_fields.ID(*getID)
		ds, _, err := dsstore.Get(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		bytes, err := json.Marshal(ds)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
		}
		fmt.Printf("%s", bytes)

	case CmdList:
		dsList, err := dsstore.List()
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		podID := types.PodID(*listPod)
		for _, ds := range dsList {
			if *listPod == "" || podID == ds.PodID {
				fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
			}
		}

	case CmdEnable:
		id := ds_fields.ID(*enableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if !ds.Disabled {
				return ds, util.Errorf("Daemon set has already been enabled")
			}
			ds.Disabled = false
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
		fmt.Println()

	case CmdDisable:
		id := ds_fields.ID(*disableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if ds.Disabled {
				return ds, util.Errorf("Daemon set has already been disabled")
			}
			ds.Disabled = true
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String())
		fmt.Println()

	case CmdDelete:
		id := ds_fields.ID(*deleteID)
		err := dsstore.Delete(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String())
		fmt.Println()

	case CmdUpdate:
		id := ds_fields.ID(*updateID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			changed := false
			if *updateMinHealth != "" {
				minHealth, err := strconv.Atoi(*updateMinHealth)
				if err != nil {
					log.Fatalf("Invalid value for minimum health, expected integer")
				}
				if ds.MinHealth != minHealth {
					changed = true
					ds.MinHealth = minHealth
				}
			}
			if *updateName != "" {
				name := ds_fields.ClusterName(*updateName)
				if ds.Name != name {
					changed = true
					ds.Name = name
				}
			}

			if *updateTimeout != TimeoutNotSpecified {
				if *updateTimeout <= time.Duration(0) {
					return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
				}
				if ds.Timeout != *updateTimeout {
					changed = true
					ds.Timeout = *updateTimeout
				}
			}
			if *updateManifest != "" {
				manifest, err := manifest.FromPath(*updateManifest)
				if err != nil {
					return ds, util.Errorf("%s", err)
				}

				if manifest.ID() != ds.PodID {
					return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID)
				}

				dsSHA, err := ds.Manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err)
				}
				newSHA, err := manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from new manifest: %v", err)
				}
				if dsSHA != newSHA {
					changed = true
					ds.Manifest = manifest
				}
			}
			if updateSelectorGiven {
				selectorString := *updateSelector
				if *updateEverywhere {
					selectorString = klabels.Everything().String()
				} else if selectorString == "" {
					return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag")
				}
				selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator)
				if err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
				if ds.NodeSelector.String() != selector.String() {
					changed = true
					ds.NodeSelector = selector
				}
			}

			if !changed {
				return ds, util.Errorf("No changes were made")
			}

			if updateSelectorGiven || *updateMinHealth != "" {
				if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
			}

			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String())
		fmt.Println()

	case CmdTestSelector:
		selectorString := *testSelectorString
		if *testSelectorEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelector(selectorString)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		matches, err := applicator.GetMatches(selector, labels.NODE, false)
		if err != nil {
			log.Fatalf("Error getting matching labels: %v", err)
		}
		fmt.Println(matches)

	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Esempio n. 24
0
File: main.go Progetto: drcapulet/p2
func main() {
	// Parse custom flags + standard Consul routing options
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	// Set up the logger
	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = new(logrus.TextFormatter)
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
				Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	// Initialize the myriad of different storage components
	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	kpStore := kp.NewConsulStore(client)
	rcStore := rcstore.NewConsul(client, RetryCount)
	rollStore := rollstore.NewConsul(client, nil)
	healthChecker := checker.NewConsulHealthChecker(client)
	labeler := labels.NewConsulApplicator(client, RetryCount)
	var sched scheduler.Scheduler
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = scheduler.NewApplicatorScheduler(httpLabeler)
	} else {
		sched = scheduler.NewApplicatorScheduler(labeler)
	}

	// Start acquiring sessions
	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, nil, logger)
	pub := stream.NewStringValuePublisher(sessions, "")

	alerter := alerting.NewNop()
	if *pagerdutyServiceKey != "" {
		var err error
		alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
		if err != nil {
			logger.WithError(err).Fatalln(
				"Unable to initialize pagerduty alerter",
			)
		}
	}

	// Run the farms!
	go rc.NewFarm(
		kpStore,
		rcStore,
		sched,
		labeler,
		pub.Subscribe().Chan(),
		logger,
		klabels.Everything(),
		alerter,
	).Start(nil)
	roll.NewFarm(
		roll.UpdateFactory{
			KPStore:       kpStore,
			RCStore:       rcStore,
			HealthChecker: healthChecker,
			Labeler:       labeler,
			Scheduler:     sched,
		},
		kpStore,
		rollStore,
		rcStore,
		pub.Subscribe().Chan(),
		logger,
		labeler,
		klabels.Everything(),
		alerter,
	).Start(nil)
}
Esempio n. 25
0
func main() {
	// 1. Generate pod for preparer in this code version (`rake artifact:prepare`)
	// 2. Locate manifests for preparer pod, premade consul pod
	// 3. Execute bootstrap with premade consul pod and preparer pod
	// 4. Delete all pods from the pod store (uuid pods). This allows the same vagrant VM to be used
	// between tests
	// 5. Deploy p2-rctl-server pod with p2-schedule
	// 6. Schedule a hello pod manifest with a replication controller
	// 7. Schedule a hello pod as a "uuid pod"
	// 8. Verify that p2-rctl-server is running by checking health.
	// 9. Verify that the RC-deployed hello is running by checking health.
	// Monitor using written pod label queries.
	// 10. Verify that the uuid hello pod is running by curling its HTTP port.
	// Health is not checked for uuid pods so checking health cannot be used.

	// list of services running on integration test host
	services := []string{"p2-preparer", "hello"}
	tempdir, err := ioutil.TempDir("", "single-node-check")
	log.Printf("Putting test manifests in %s\n", tempdir)
	if err != nil {
		log.Fatalln("Could not create temp directory, bailing")
	}

	userHookManifest, err := userCreationHookManifest(tempdir)
	if err != nil {
		log.Fatalf("Couldn't schedule the user creation hook: %s", err)
	}

	preparerManifest, err := generatePreparerPod(tempdir, userHookManifest)
	if err != nil {
		log.Fatalf("Could not generate preparer pod: %s\n", err)
	}
	config, err := preparer.LoadConfig(preparerManifest)
	if err != nil {
		log.Fatalf("could not unmarshal config: %s\n", err)
	}

	consulManifest, err := getConsulManifest(tempdir)
	if err != nil {
		log.Fatalf("Could not generate consul pod: %s\n", err)
	}
	signedPreparerManifest, err := signManifest(preparerManifest, tempdir)
	if err != nil {
		log.Fatalf("Could not sign preparer manifest: %s\n", err)
	}
	signedConsulManifest, err := signManifest(consulManifest, tempdir)
	if err != nil {
		log.Fatalf("Could not sign consul manifest: %s\n", err)
	}

	fmt.Println("Executing bootstrap")
	err = executeBootstrap(signedPreparerManifest, signedConsulManifest)
	if err != nil {
		log.Fatalf("Could not execute bootstrap: %s\n%s", err, targetLogs())
	}

	// Wait a bit for preparer's http server to be ready
	err = waitForStatus(preparerStatusPort, "preparer", 10*time.Second)
	if err != nil {
		log.Fatalf("Couldn't check preparer status: %s", err)
	}

	consulClient := kp.NewConsulClient(kp.Options{})
	// Get all the pod unique keys so we can unschedule them all
	keys, _, err := consulClient.KV().Keys(podstore.PodTree+"/", "", nil)
	if err != nil {
		log.Fatalf("Could not fetch pod keys to remove from store at beginning of test: %s", err)
	}

	podStore := podstore.NewConsul(consulClient.KV())
	for _, key := range keys {
		keyParts := strings.Split(key, "/")
		err = podStore.Unschedule(types.PodUniqueKey(keyParts[len(keyParts)-1]))
		if err != nil {
			log.Fatalf("Could not unschedule pod %s from consul: %s", keyParts[len(keyParts)-1], err)
		}
	}

	err = scheduleRCTLServer(tempdir)
	if err != nil {
		log.Fatalf("Could not schedule RCTL server: %s", err)
	}

	// Now we're going to test some conditions that each take non-negligible amount of time to verify.
	// We'll spin up a goroutine for each "test" which either closes the error channel, or passes an error.
	type testCase struct {
		testName string
		errCh    chan error
		logger   logging.Logger
	}

	var testCases []testCase

	// Test that a "legacy" pod installed by an RC comes up correctly and has health reported
	legacyTest := make(chan error)
	verifyLegacyPodLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyLegacyPod",
	})
	go verifyLegacyPod(legacyTest, tempdir, config, services, verifyLegacyPodLogger)
	testCases = append(testCases, testCase{
		testName: "verifyLegacyPod",
		errCh:    legacyTest,
		logger:   verifyLegacyPodLogger,
	})

	// Test that a "uuid" pod installed by p2-schedule comes up correctly
	uuidTest := make(chan error)
	verifyUUIDPodLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyUUIDPod",
	})
	testCases = append(testCases, testCase{
		testName: "verifyUUIDPod",
		errCh:    uuidTest,
		logger:   verifyUUIDPodLogger,
	})
	go verifyUUIDPod(uuidTest, tempdir, verifyUUIDPodLogger)

	// Test that exit information for a process started by a pod is properly recorded in consul.
	processExitTest := make(chan error)
	verifyProcessExitLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"test_case": "verifyProcessExit",
	})
	go verifyProcessExit(processExitTest, tempdir, verifyProcessExitLogger)
	testCases = append(testCases, testCase{
		testName: "verifyProcessResult",
		errCh:    processExitTest,
		logger:   verifyProcessExitLogger,
	})

	for _, t := range testCases {
		select {
		case err, ok := <-t.errCh:
			if err != nil {
				t.logger.Fatal(err)
			}
			if ok {
				t.logger.Fatal("Error channel not closed")
			}
		case <-time.After(1 * time.Minute):
			t.logger.Fatal("Timed out waiting for a result")
		}

		t.logger.Infoln("Success!")
	}
}
Esempio n. 26
0
File: main.go Progetto: rudle/p2
func main() {
	cmd, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	applicator := labels.NewConsulApplicator(client, 3)
	exitCode := 0

	switch cmd {
	case CmdShow:
		labelType, err := labels.AsType(*showLabelType)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error while parsing label type. Check the commandline.\n%v\n", err)
			exitCode = 1
			break
		}

		labelsForEntity, err := applicator.GetLabels(labelType, *showID)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err)
			exitCode = 1
			break
		}
		fmt.Printf("%s/%s: %s\n", labelType, *showID, labelsForEntity.Labels.String())
		return
	case CmdApply:
		// if xnor(selector, id)
		if (*applySubjectSelector == "") == (*applySubjectID == "") {
			fmt.Fprint(os.Stderr, "Must pass either an ID or a selector for objects to apply the given label to")
			exitCode = 1
			break
		}
		autoConfirm = *applyAutoConfirm

		labelType, err := labels.AsType(*applyLabelType)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Unrecognized type %s. Check the commandline and documentation.\nhttps://godoc.org/github.com/square/p2/pkg/labels#pkg-constants\n", *applyLabelType)
			exitCode = 1
			break
		}

		additiveLabels := *applyAddititiveLabels
		destructiveKeys := *applyDestructiveLabels

		var matches []labels.Labeled
		if *applySubjectSelector != "" {
			subject, err := klabels.Parse(*applySubjectSelector)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Error while parsing subject label. Check the syntax.\n%v\n", err)
				exitCode = 1
				break
			}

			cachedMatch := false
			matches, err = applicator.GetMatches(subject, labelType, cachedMatch)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Error while finding label matches. Check the syntax.\n%v\n", err)
				exitCode = 1
				break
			}
		} else {
			matches = []labels.Labeled{{ID: *applySubjectID}}
		}

		if len(additiveLabels) > 0 {
			fmt.Printf("labels to be added: %s\n", klabels.Set(additiveLabels))
		}

		if len(destructiveKeys) > 0 {
			fmt.Printf("labels to be removed: %s\n", destructiveKeys)
		}

		var labelsForEntity labels.Labeled
		for _, match := range matches {
			entityID := match.ID

			err := applyLabels(applicator, entityID, labelType, additiveLabels, destructiveKeys)
			if err != nil {
				fmt.Printf("Encountered err during labeling, %v", err)
				exitCode = 1
			}

			labelsForEntity, err = applicator.GetLabels(labelType, entityID)
			if err != nil {
				fmt.Fprintf(os.Stderr, "Got error while querying labels. %v\n", err)
				exitCode = 1
				continue
			}
			fmt.Printf("%s/%s: %s\n", labelType, entityID, labelsForEntity.Labels.String())
		}
		break
	}

	os.Exit(exitCode)
}
Esempio n. 27
0
func main() {
	cmd, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	kv := kp.NewConsulStore(client)
	logger := logging.NewLogger(logrus.Fields{})
	pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger)
	session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil)
	if err != nil {
		log.Fatalf("Could not create session: %s", err)
	}

	switch cmd {
	case cmdCreateText:
		az := fields.AvailabilityZone(*createAZ)
		cn := fields.ClusterName(*createName)
		podID := types.PodID(*createPodID)
		selector := selectorFrom(az, cn, podID)
		pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session)

		annotations := *createAnnotations
		var parsedAnnotations map[string]interface{}
		err := json.Unmarshal([]byte(annotations), &parsedAnnotations)
		if err != nil {
			log.Fatalf("could not parse json: %v", err)
		}
		_, err = pccontrol.Create(parsedAnnotations)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
	case cmdGetText:
		az := fields.AvailabilityZone(*getAZ)
		cn := fields.ClusterName(*getName)
		podID := types.PodID(*getPodID)
		pcID := fields.ID(*getID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		pc, err := pccontrol.Get()
		if err != nil {
			log.Fatalf("Caught error while fetching pod cluster: %v", err)
		}

		bytes, err := json.Marshal(pc)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal PC as JSON")
		}
		fmt.Printf("%s", bytes)
	case cmdDeleteText:
		az := fields.AvailabilityZone(*deleteAZ)
		cn := fields.ClusterName(*deleteName)
		podID := types.PodID(*deletePodID)
		pcID := fields.ID(*deleteID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		errors := pccontrol.Delete()
		if len(errors) >= 1 {
			for _, err := range errors {
				_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error())))
			}
			os.Exit(1)
		}
	case cmdUpdateText:
		az := fields.AvailabilityZone(*updateAZ)
		cn := fields.ClusterName(*updateName)
		podID := types.PodID(*updatePodID)
		pcID := fields.ID(*updateID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		var annotations fields.Annotations
		err := json.Unmarshal([]byte(*updateAnnotations), &annotations)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err)))
			os.Exit(1)
		}

		pc, err := pccontrol.Update(annotations)
		if err != nil {
			log.Fatalf("Error during PodCluster update: %v\n%v", err, pc)
			os.Exit(1)
		}
		bytes, err := json.Marshal(pc)
		if err != nil {
			log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc)
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	case cmdListText:
		pcs, err := pcstore.List()
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err)))
			os.Exit(1)
		}

		bytes, err := json.Marshal(pcs)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err)))
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Esempio n. 28
0
File: main.go Progetto: tomzhang/p2
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = &logrus.TextFormatter{}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	client := kp.NewConsulClient(opts)
	labeler := labels.NewConsulApplicator(client, 3)
	sched := rc.NewApplicatorScheduler(labeler)
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = rc.NewApplicatorScheduler(httpLabeler)
	}
	rctl := RCtl{
		baseClient: client,
		rcs:        rcstore.NewConsul(client, 3),
		rls:        rollstore.NewConsul(client),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case CMD_CREATE:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels)
	case CMD_DELETE:
		rctl.Delete(*deleteID, *deleteForce)
	case CMD_REPLICAS:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case CMD_LIST:
		rctl.List(*listJSON)
	case CMD_GET:
		rctl.Get(*getID, *getManifest)
	case CMD_ENABLE:
		rctl.Enable(*enableID)
	case CMD_DISABLE:
		rctl.Disable(*disableID)
	case CMD_ROLL:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollDelete)
	case CMD_FARM:
		rctl.Farm()
	case CMD_SCHEDUP:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed, *schedupDelete)
	}
}
Esempio n. 29
0
func verifyProcessExit(errCh chan error, tempDir string, logger logging.Logger) {
	defer close(errCh)

	// Schedule a uuid pod
	podUniqueKey, err := createHelloUUIDPod(tempDir, 43772, logger)
	if err != nil {
		errCh <- fmt.Errorf("Could not schedule UUID hello pod: %s", err)
		return
	}

	logger = logger.SubLogger(logrus.Fields{
		"pod_unique_key": podUniqueKey,
	})
	logger.Infoln("Scheduled hello instance on port 43772")

	err = verifyHelloRunning(podUniqueKey, logger)
	if err != nil {
		errCh <- fmt.Errorf("Couldn't get hello running as a uuid pod: %s", err)
		return
	}
	logger.Infoln("Hello instance launched")

	time.Sleep(3 * time.Second)

	logger.Infoln("Waiting for hello instance to listen on 43772")
	// now wait for the hello server to start running
	timeout := time.After(30 * time.Second)
	for {
		resp, err := http.Get("http://localhost:43772/")
		if err == nil {
			resp.Body.Close()
			break
		}

		select {
		case <-timeout:
			errCh <- fmt.Errorf("Hello didn't come up listening on 43772: %s", err)
			return
		default:
		}

		time.Sleep(1 * time.Second)
	}

	exitCode := rand.Intn(100) + 1
	logger.Infof("Causing hello on 43772 to exit with status %d", exitCode)
	// Make an http request to hello to make it exit with exitCode. We expect the http request to fail due
	// to the server exiting, so don't check for http errors.
	_, err = http.Get(fmt.Sprintf("http://localhost:43772/exit/%d", exitCode))
	if err == nil {
		// This is bad, it means the hello server didn't die and kill our request
		// in the middle
		errCh <- util.Errorf("Couldn't kill hello server with http request")
		return
	}

	urlError, ok := err.(*url.Error)
	if ok && urlError.Err == io.EOF {
		// This is good, it means the server died
	} else {
		errCh <- fmt.Errorf("Couldn't tell hello to die over http: %s", err)
		return
	}

	logger.Infoln("Checking for exit code in SQL database")
	finishService, err := podprocess.NewSQLiteFinishService(sqliteFinishDatabasePath, logging.DefaultLogger)
	if err != nil {
		errCh <- err
		return
	}

	var finishResult podprocess.FinishOutput
	timeout = time.After(30 * time.Second)
	for {
		finishResult, err = finishService.LastFinishForPodUniqueKey(podUniqueKey)
		if err == nil {
			break
		}

		select {
		case <-timeout:
			// Try to manually run the finish script in order to make debugging the test failure easier
			output, err := exec.Command("sudo", fmt.Sprintf("/var/service/hello-%s__hello__launch/finish", podUniqueKey), "1", "2").CombinedOutput()
			if err != nil {
				logger.WithError(err).Infoln("DEBUG: Debug attempt to run finish script failed")
			}

			logger.Infof("DEBUG: Output of direct execution of finish script: %s", string(output))

			errCh <- fmt.Errorf("Did not find a finish row by the deadline: %s", err)
			return
		default:
		}
	}

	if finishResult.PodUniqueKey != podUniqueKey {
		errCh <- fmt.Errorf("Expected finish result for '%s' but it was for '%s'", podUniqueKey, finishResult.PodUniqueKey)
		return
	}

	if finishResult.ExitCode != exitCode {
		errCh <- fmt.Errorf("Exit code for '%s' in the sqlite database was expected to be %d but was %d", podUniqueKey, exitCode, finishResult.ExitCode)
		return
	}

	logger.Infoln("Checking for exit code in consul")
	timeout = time.After(30 * time.Second)
	podStatusStore := podstatus.NewConsul(statusstore.NewConsul(kp.NewConsulClient(kp.Options{})), kp.PreparerPodStatusNamespace)
	for {

		podStatus, _, err := podStatusStore.Get(podUniqueKey)
		if err != nil {
			errCh <- err
			return
		}

		found := false
		for _, processStatus := range podStatus.ProcessStatuses {
			if processStatus.LaunchableID == "hello" && processStatus.EntryPoint == "launch" {
				found = true
				if processStatus.LastExit == nil {
					errCh <- fmt.Errorf("Found no last exit in consul pod status for %s", podUniqueKey)
					return
				}

				if processStatus.LastExit.ExitCode != exitCode {
					errCh <- fmt.Errorf("Exit code for '%s' in consul was expected to be %d but was %d", podUniqueKey, exitCode, finishResult.ExitCode)
					return
				}
			}
		}

		if found {
			logger.Infoln("Successful!")
			break
		}

		select {
		case <-timeout:
			errCh <- fmt.Errorf("There was no pod process for hello/launch for %s in consul", podUniqueKey)
			return
		default:
		}
	}
}
Esempio n. 30
0
func ParseWithConsulOptions() (string, kp.Options, labels.ApplicatorWithoutWatches) {
	consulURL := kingpin.Flag("consul", "The hostname and port of a consul agent in the p2 cluster. Defaults to 0.0.0.0:8500.").String()
	httpApplicatorURL := kingpin.Flag("http-applicator-url", "The URL of an labels.httpApplicator target, including the protocol and port. For example, https://consul-server.io:9999").URL()
	token := kingpin.Flag("token", "The consul ACL token to use. Empty by default.").String()
	tokenFile := kingpin.Flag("token-file", "The file containing the Consul ACL token").ExistingFile()
	headers := kingpin.Flag("header", "An HTTP header to add to requests, in KEY=VALUE form. Can be specified multiple times.").StringMap()
	https := kingpin.Flag("https", "Use HTTPS").Bool()
	wait := kingpin.Flag("wait", "Maximum duration for Consul watches, before resetting and starting again.").Default("30s").Duration()
	caFile := kingpin.Flag("tls-ca-file", "File containing the x509 PEM-encoded CA ").ExistingFile()
	keyFile := kingpin.Flag("tls-key-file", "File containing the x509 PEM-encoded private key").ExistingFile()
	certFile := kingpin.Flag("tls-cert-file", "File containing the x509 PEM-encoded public key certificate").ExistingFile()

	cmd := kingpin.Parse()

	if *tokenFile != "" {
		tokenBytes, err := ioutil.ReadFile(*tokenFile)
		if err != nil {
			log.Fatalln(err)
		}
		*token = string(tokenBytes)
	}
	var transport http.RoundTripper
	if *caFile != "" || *keyFile != "" || *certFile != "" {
		tlsConfig, err := netutil.GetTLSConfig(*certFile, *keyFile, *caFile)
		if err != nil {
			log.Fatalln(err)
		}

		transport = &http.Transport{
			TLSClientConfig: tlsConfig,
			// same dialer as http.DefaultTransport
			Dial: (&net.Dialer{
				Timeout:   http.DefaultClient.Timeout,
				KeepAlive: http.DefaultClient.Timeout,
			}).Dial,
		}
	} else {
		transport = http.DefaultTransport
	}
	httpClient := netutil.NewHeaderClient(*headers, transport)

	kpOpts := kp.Options{
		Address:  *consulURL,
		Token:    *token,
		Client:   httpClient,
		HTTPS:    *https,
		WaitTime: *wait,
	}

	var applicator labels.ApplicatorWithoutWatches
	var err error
	if *httpApplicatorURL != nil {
		applicator, err = labels.NewHTTPApplicator(httpClient, *httpApplicatorURL)
		if err != nil {
			log.Fatalln(err)
		}
	} else {
		applicator = labels.NewConsulApplicator(kp.NewConsulClient(kpOpts), 0)
	}
	return cmd, kpOpts, applicator
}