Пример #1
0
func basicLogger() logging.Logger {
	return logging.NewLogger(
		logrus.Fields{
			"pod": "testpod",
		},
	)
}
Пример #2
0
// UpdatePods looks at the pods currently being monitored and
// compares that to what the reality store indicates should be
// running. UpdatePods then shuts down the monitors for dead
// pods and creates PodWatch structs for new pods.
func TestUpdatePods(t *testing.T) {
	var current []PodWatch
	var reality []kp.ManifestResult
	// ids for current: 0, 1, 2, 3
	for i := 0; i < 4; i++ {
		current = append(current, *newWatch(types.PodID(strconv.Itoa(i))))
	}
	// ids for reality: 1, 2, test
	for i := 1; i < 3; i++ {
		// Health checking is not supported for uuid pods, so ensure that even
		// if /reality contains a uuid pod we don't actually watch its health
		uuidKeyResult := newManifestResult("some_uuid_pod")
		uuidKeyResult.PodUniqueKey = types.NewPodUUID()
		reality = append(reality, uuidKeyResult)
		reality = append(reality, newManifestResult(current[i].manifest.ID()))
	}
	reality = append(reality, newManifestResult("test"))

	// ids for pods: 1, 2, test
	// 0, 3 should have values in their shutdownCh
	logger := logging.NewLogger(logrus.Fields{})
	pods := updatePods(&MockHealthManager{}, nil, nil, current, reality, "", &logger)
	Assert(t).AreEqual(true, <-current[0].shutdownCh, "this PodWatch should have been shutdown")
	Assert(t).AreEqual(true, <-current[3].shutdownCh, "this PodWatch should have been shutdown")

	Assert(t).AreEqual(current[1].manifest.ID(), pods[0].manifest.ID(), "pod with id:1 should have been returned")
	Assert(t).AreEqual(current[2].manifest.ID(), pods[1].manifest.ID(), "pod with id:1 should have been returned")
	Assert(t).AreEqual("test", string(pods[2].manifest.ID()), "should have added pod with id:test to list")
}
Пример #3
0
func main() {
	quitCh := make(chan struct{})

	_, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsStore := dsstore.NewConsul(client, 3, &logger)
	kpStore := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, quitCh, logger)

	dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches)

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitCh)
	}()

	dsf.Start(quitCh)
}
Пример #4
0
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts, labeler := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	if *logJSON {
		logger.Logger.Formatter = &logrus.JSONFormatter{}
	} else {
		logger.Logger.Formatter = &logrus.TextFormatter{}
	}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	sched := scheduler.NewApplicatorScheduler(labeler)

	rctl := rctlParams{
		httpClient: httpClient,
		baseClient: client,
		rcs:        rcstore.NewConsul(client, labeler, 3),
		rls:        rollstore.NewConsul(client, labeler, nil),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case cmdCreateText:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels)
	case cmdDeleteText:
		rctl.Delete(*deleteID, *deleteForce)
	case cmdReplicasText:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case cmdListText:
		rctl.List(*listJSON)
	case cmdGetText:
		rctl.Get(*getID, *getManifest)
	case cmdEnableText:
		rctl.Enable(*enableID)
	case cmdDisableText:
		rctl.Disable(*disableID)
	case cmdRollText:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey)
	case cmdSchedupText:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed)
	}
}
Пример #5
0
func main() {
	logger := logging.NewLogger(logrus.Fields{})
	configPath := os.Getenv("CONFIG_PATH")
	if configPath == "" {
		logger.NoFields().Fatalln("No CONFIG_PATH variable was given")
	}
	preparerConfig, err := preparer.LoadPreparerConfig(configPath)
	if err != nil {
		logger.WithField("inner_err", err).Fatalln("could not load preparer config")
	}

	prep, err := preparer.New(preparerConfig, logger)
	if err != nil {
		logger.WithField("inner_err", err).Fatalln("Could not initialize preparer")
	}
	defer prep.Close()

	logger.WithFields(logrus.Fields{
		"starting":    true,
		"node_name":   preparerConfig.NodeName,
		"consul":      preparerConfig.ConsulAddress,
		"hooks_dir":   preparerConfig.HooksDirectory,
		"status_port": preparerConfig.StatusPort,
		"auth_type":   preparerConfig.Auth["type"],
		"keyring":     preparerConfig.Auth["keyring"],
		"version":     version.VERSION,
	}).Infoln("Preparer started successfully")

	quitMainUpdate := make(chan struct{})
	quitHookUpdate := make(chan struct{})
	quitMonitorPodHealth := make(chan struct{})

	go prep.WatchForPodManifestsForNode(quitMainUpdate)
	go prep.WatchForHooks(quitHookUpdate)

	if preparerConfig.StatusPort != 0 {
		http.HandleFunc("/_status",
			func(w http.ResponseWriter, r *http.Request) {
				fmt.Fprintf(w, "p2-preparer OK")
			})
		go http.ListenAndServe(fmt.Sprintf(":%d", preparerConfig.StatusPort), nil)
	}

	// Launch health checking watch. This watch tracks health of
	// all pods on this host and writes the information to consul
	go watch.MonitorPodHealth(preparerConfig, &logger, quitMonitorPodHealth)

	waitForTermination(logger, quitMainUpdate, quitHookUpdate, quitMonitorPodHealth)

	logger.NoFields().Infoln("Terminating")
}
Пример #6
0
// UpdatePods looks at the pods currently being monitored and
// compares that to what the reality store indicates should be
// running. UpdatePods then shuts down the monitors for dead
// pods and creates PodWatch structs for new pods.
func TestUpdatePods(t *testing.T) {
	var current []PodWatch
	var reality []kp.ManifestResult
	// ids for current: 0, 1, 2, 3
	for i := 0; i < 4; i++ {
		current = append(current, *newWatch(strconv.Itoa(i)))
	}
	// ids for reality: 1, 2, test
	for i := 1; i < 3; i++ {
		reality = append(reality, newManifestResult(current[i].manifest.ID()))
	}
	reality = append(reality, newManifestResult("test"))

	// ids for pods: 1, 2, test
	// 0, 3 should have values in their shutdownCh
	logger := logging.NewLogger(logrus.Fields{})
	pods := updatePods(&MockHealthManager{}, nil, current, reality, "", &logger)
	Assert(t).AreEqual(true, <-current[0].shutdownCh, "this PodWatch should have been shutdown")
	Assert(t).AreEqual(true, <-current[3].shutdownCh, "this PodWatch should have been shutdown")

	Assert(t).AreEqual(current[1].manifest.ID(), pods[0].manifest.ID(), "pod with id:1 should have been returned")
	Assert(t).AreEqual(current[2].manifest.ID(), pods[1].manifest.ID(), "pod with id:1 should have been returned")
	Assert(t).AreEqual("test", pods[2].manifest.ID(), "should have added pod with id:test to list")
}
Пример #7
0
func main() {
	kingpin.CommandLine.Name = "p2-replicate"
	kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.

	Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com

	This will take the pod whose manifest is located at helloworld.yaml and
	deploy it to the three nodes aws1.example.com, aws2.example.com, and
	aws3.example.com

	Because of --min-nodes 2, the replicator will ensure that at least two healthy
	nodes remain up at all times, according to p2's health checks.
`

	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	healthChecker := checker.NewConsulHealthChecker(client)

	manifest, err := pods.ManifestFromURI(*manifestUri)
	if err != nil {
		log.Fatalf("%s", err)
	}

	logger := logging.NewLogger(logrus.Fields{
		"pod": manifest.ID(),
	})
	logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}
	lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
	repl, err := replication.NewReplicator(
		manifest,
		logger,
		*hosts,
		len(*hosts)-*minNodes,
		store,
		healthChecker,
		health.HealthState(*threshold),
		lockMessage,
	)
	if err != nil {
		log.Fatalf("Could not initialize replicator: %s", err)
	}

	replication, errCh, err := repl.InitializeReplication(*overrideLock)
	if err != nil {
		log.Fatalf("Unable to initialize replication: %s", err)
	}

	// auto-drain this channel
	go func() {
		for range errCh {
		}
	}()

	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		replication.Cancel()
		os.Exit(1)
	}()

	replication.Enact()
}
Пример #8
0
Файл: main.go Проект: rudle/p2
func main() {
	cmd, consulOpts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	logger := logging.NewLogger(logrus.Fields{})
	dsstore := dsstore.NewConsul(client, 3, &logger)
	applicator := labels.NewConsulApplicator(client, 3)

	switch cmd {
	case CmdCreate:
		minHealth, err := strconv.Atoi(*createMinHealth)
		if err != nil {
			log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
		}
		name := ds_fields.ClusterName(*createName)

		manifest, err := manifest.FromPath(*createManifest)
		if err != nil {
			log.Fatalf("%s", err)
		}

		podID := manifest.ID()

		if *createTimeout <= time.Duration(0) {
			log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
		}

		selectorString := *createSelector
		if *createEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			selectorString = klabels.Nothing().String()
			log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("%v has been created in consul", ds.ID)
		fmt.Println()

	case CmdGet:
		id := ds_fields.ID(*getID)
		ds, _, err := dsstore.Get(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		bytes, err := json.Marshal(ds)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
		}
		fmt.Printf("%s", bytes)

	case CmdList:
		dsList, err := dsstore.List()
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		podID := types.PodID(*listPod)
		for _, ds := range dsList {
			if *listPod == "" || podID == ds.PodID {
				fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
			}
		}

	case CmdEnable:
		id := ds_fields.ID(*enableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if !ds.Disabled {
				return ds, util.Errorf("Daemon set has already been enabled")
			}
			ds.Disabled = false
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
		fmt.Println()

	case CmdDisable:
		id := ds_fields.ID(*disableID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			if ds.Disabled {
				return ds, util.Errorf("Daemon set has already been disabled")
			}
			ds.Disabled = true
			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String())
		fmt.Println()

	case CmdDelete:
		id := ds_fields.ID(*deleteID)
		err := dsstore.Delete(id)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String())
		fmt.Println()

	case CmdUpdate:
		id := ds_fields.ID(*updateID)

		mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
			changed := false
			if *updateMinHealth != "" {
				minHealth, err := strconv.Atoi(*updateMinHealth)
				if err != nil {
					log.Fatalf("Invalid value for minimum health, expected integer")
				}
				if ds.MinHealth != minHealth {
					changed = true
					ds.MinHealth = minHealth
				}
			}
			if *updateName != "" {
				name := ds_fields.ClusterName(*updateName)
				if ds.Name != name {
					changed = true
					ds.Name = name
				}
			}

			if *updateTimeout != TimeoutNotSpecified {
				if *updateTimeout <= time.Duration(0) {
					return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
				}
				if ds.Timeout != *updateTimeout {
					changed = true
					ds.Timeout = *updateTimeout
				}
			}
			if *updateManifest != "" {
				manifest, err := manifest.FromPath(*updateManifest)
				if err != nil {
					return ds, util.Errorf("%s", err)
				}

				if manifest.ID() != ds.PodID {
					return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID)
				}

				dsSHA, err := ds.Manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err)
				}
				newSHA, err := manifest.SHA()
				if err != nil {
					return ds, util.Errorf("Unable to get SHA from new manifest: %v", err)
				}
				if dsSHA != newSHA {
					changed = true
					ds.Manifest = manifest
				}
			}
			if updateSelectorGiven {
				selectorString := *updateSelector
				if *updateEverywhere {
					selectorString = klabels.Everything().String()
				} else if selectorString == "" {
					return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag")
				}
				selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator)
				if err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
				if ds.NodeSelector.String() != selector.String() {
					changed = true
					ds.NodeSelector = selector
				}
			}

			if !changed {
				return ds, util.Errorf("No changes were made")
			}

			if updateSelectorGiven || *updateMinHealth != "" {
				if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil {
					return ds, util.Errorf("Error occurred: %v", err)
				}
			}

			return ds, nil
		}

		_, err := dsstore.MutateDS(id, mutator)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
		fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String())
		fmt.Println()

	case CmdTestSelector:
		selectorString := *testSelectorString
		if *testSelectorEverywhere {
			selectorString = klabels.Everything().String()
		} else if selectorString == "" {
			fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag")
		}
		selector, err := parseNodeSelector(selectorString)
		if err != nil {
			log.Fatalf("Error occurred: %v", err)
		}

		matches, err := applicator.GetMatches(selector, labels.NODE, false)
		if err != nil {
			log.Fatalf("Error getting matching labels: %v", err)
		}
		fmt.Println(matches)

	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Пример #9
0
func main() {
	// Other packages define flags, and they need parsing here.
	kingpin.Parse()

	logger := logging.NewLogger(logrus.Fields{})
	configPath := os.Getenv("CONFIG_PATH")
	if configPath == "" {
		logger.NoFields().Fatalln("No CONFIG_PATH variable was given")
	}
	preparerConfig, err := preparer.LoadConfig(configPath)
	if err != nil {
		logger.WithError(err).Fatalln("could not load preparer config")
	}
	err = param.Parse(preparerConfig.Params)
	if err != nil {
		logger.WithError(err).Fatalln("invalid parameter")
	}

	statusServer, err := preparer.NewStatusServer(preparerConfig.StatusPort, preparerConfig.StatusSocket, &logger)
	if err == preparer.NoServerConfigured {
		logger.NoFields().Warningln("No status port or socket provided, no status server configured")
	} else if err != nil {
		logger.WithError(err).Fatalln("Could not start status server")
	} else {
		go statusServer.Serve()
		defer statusServer.Close()
	}

	prep, err := preparer.New(preparerConfig, logger)
	if err != nil {
		logger.WithError(err).Fatalln("Could not initialize preparer")
	}
	defer prep.Close()

	logger.WithFields(logrus.Fields{
		"starting":    true,
		"node_name":   preparerConfig.NodeName,
		"consul":      preparerConfig.ConsulAddress,
		"hooks_dir":   preparerConfig.HooksDirectory,
		"status_port": preparerConfig.StatusPort,
		"auth_type":   preparerConfig.Auth["type"],
		"keyring":     preparerConfig.Auth["keyring"],
		"version":     version.VERSION,
	}).Infoln("Preparer started successfully")

	quitMainUpdate := make(chan struct{})
	var quitChans []chan struct{}

	// Install the latest hooks before any pods are processed
	err = prep.InstallHooks()
	if err != nil {
		logger.WithError(err).Fatalln("Could not do initial hook installation")
	}
	go prep.WatchForPodManifestsForNode(quitMainUpdate)

	if prep.PodProcessReporter != nil {
		quitPodProcessReporter := make(chan struct{})
		quitChans = append(quitChans, quitPodProcessReporter)
		go prep.PodProcessReporter.Run(quitPodProcessReporter)
	}

	// Launch health checking watch. This watch tracks health of
	// all pods on this host and writes the information to consul
	quitMonitorPodHealth := make(chan struct{})
	var wgHealth sync.WaitGroup
	wgHealth.Add(1)
	go func() {
		defer wgHealth.Done()
		watch.MonitorPodHealth(preparerConfig, &logger, quitMonitorPodHealth)
	}()

	waitForTermination(logger, quitMainUpdate, quitChans)

	// The preparer should continue to report app health during a shutdown, so terminate
	// the health monitor last.
	close(quitMonitorPodHealth)
	wgHealth.Wait()

	logger.NoFields().Infoln("Terminating")
}
Пример #10
0
func main() {
	cmd, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	kv := kp.NewConsulStore(client)
	logger := logging.NewLogger(logrus.Fields{})
	pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger)
	session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil)
	if err != nil {
		log.Fatalf("Could not create session: %s", err)
	}

	switch cmd {
	case cmdCreateText:
		az := fields.AvailabilityZone(*createAZ)
		cn := fields.ClusterName(*createName)
		podID := types.PodID(*createPodID)
		selector := selectorFrom(az, cn, podID)
		pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session)

		annotations := *createAnnotations
		var parsedAnnotations map[string]interface{}
		err := json.Unmarshal([]byte(annotations), &parsedAnnotations)
		if err != nil {
			log.Fatalf("could not parse json: %v", err)
		}
		_, err = pccontrol.Create(parsedAnnotations)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
	case cmdGetText:
		az := fields.AvailabilityZone(*getAZ)
		cn := fields.ClusterName(*getName)
		podID := types.PodID(*getPodID)
		pcID := fields.ID(*getID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		pc, err := pccontrol.Get()
		if err != nil {
			log.Fatalf("Caught error while fetching pod cluster: %v", err)
		}

		bytes, err := json.Marshal(pc)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal PC as JSON")
		}
		fmt.Printf("%s", bytes)
	case cmdDeleteText:
		az := fields.AvailabilityZone(*deleteAZ)
		cn := fields.ClusterName(*deleteName)
		podID := types.PodID(*deletePodID)
		pcID := fields.ID(*deleteID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		errors := pccontrol.Delete()
		if len(errors) >= 1 {
			for _, err := range errors {
				_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error())))
			}
			os.Exit(1)
		}
	case cmdUpdateText:
		az := fields.AvailabilityZone(*updateAZ)
		cn := fields.ClusterName(*updateName)
		podID := types.PodID(*updatePodID)
		pcID := fields.ID(*updateID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		var annotations fields.Annotations
		err := json.Unmarshal([]byte(*updateAnnotations), &annotations)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err)))
			os.Exit(1)
		}

		pc, err := pccontrol.Update(annotations)
		if err != nil {
			log.Fatalf("Error during PodCluster update: %v\n%v", err, pc)
			os.Exit(1)
		}
		bytes, err := json.Marshal(pc)
		if err != nil {
			log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc)
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	case cmdListText:
		pcs, err := pcstore.List()
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err)))
			os.Exit(1)
		}

		bytes, err := json.Marshal(pcs)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err)))
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Пример #11
0
func init() {
	Log = logging.NewLogger(logrus.Fields{})
}
Пример #12
0
func main() {
	replicate.Version(version.VERSION)
	replicate.Parse(os.Args[1:])

	opts := kp.Options{
		Address: *consulUrl,
		Token:   *consulToken,
		Client:  net.NewHeaderClient(*headers, http.DefaultTransport),
		HTTPS:   *https,
	}
	store := kp.NewConsulStore(opts)
	healthChecker := health.NewConsulHealthChecker(opts)

	// Fetch manifest (could be URI) into temp file
	localMan, err := ioutil.TempFile("", "tempmanifest")
	defer os.Remove(localMan.Name())
	if err != nil {
		log.Fatalln("Couldn't create tempfile")
	}
	if err := uri.URICopy(*manifestUri, localMan.Name()); err != nil {
		log.Fatalf("Could not fetch manifest: %s", err)
	}

	manifest, err := pods.ManifestFromPath(localMan.Name())
	if err != nil {
		log.Fatalf("Invalid manifest: %s", err)
	}

	healthResults, err := healthChecker.Service(manifest.ID())
	if err != nil {
		log.Fatalf("Could not get initial health results: %s", err)
	}
	order := health.SortOrder{
		Nodes:  *hosts,
		Health: healthResults,
	}
	sort.Sort(order)

	repl := replication.Replicator{
		Manifest: *manifest,
		Store:    store,
		Health:   healthChecker,
		Nodes:    *hosts, // sorted by the health.SortOrder
		Active:   len(*hosts) - *minNodes,
		Logger: logging.NewLogger(logrus.Fields{
			"pod": manifest.ID(),
		}),
		Threshold: health.HealthState(*threshold),
	}
	repl.Logger.Logger.Formatter = &logrus.TextFormatter{
		DisableTimestamp: false,
		FullTimestamp:    true,
		TimestampFormat:  "15:04:05.000",
	}

	if err := repl.CheckPreparers(); err != nil {
		log.Fatalf("Preparer check failed: %s", err)
	}

	// create a lock with a meaningful name and set up a renewal loop for it
	thisHost, err := os.Hostname()
	if err != nil {
		log.Fatalf("Could not retrieve hostname: %s", err)
	}
	thisUser, err := user.Current()
	if err != nil {
		log.Fatalf("Could not retrieve user: %s", err)
	}
	lock, err := store.NewLock(fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now()))
	if err != nil {
		log.Fatalf("Could not generate lock: %s", err)
	}
	// deferring on main is not particularly useful, since os.Exit will skip
	// the defer, so we have to manually destroy the lock at the right exit
	// paths
	go func() {
		for range time.Tick(10 * time.Second) {
			if err := lock.Renew(); err != nil {
				// if the renewal failed, then either the lock is already dead
				// or the consul agent cannot be reached
				log.Fatalf("Lock could not be renewed: %s", err)
			}
		}
	}()
	if err := repl.LockHosts(lock, *overrideLock); err != nil {
		lock.Destroy()
		log.Fatalf("Could not lock all hosts: %s", err)
	}

	// auto-drain this channel
	errs := make(chan error)
	go func() {
		for range errs {
		}
	}()

	quitch := make(chan struct{})
	go func() {
		// clear lock immediately on ctrl-C
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)
		<-signals
		close(quitch)
		lock.Destroy()
		os.Exit(1)
	}()

	repl.Enact(errs, quitch)
	lock.Destroy()
}
Пример #13
0
func authorize(manifest manifest.Manifest) error {
	var policy auth.Policy
	var err error
	switch *authType {
	case auth.Null:
		if *keyring != "" {
			return util.Errorf("--keyring may not be specified if --auth-type is '%s'", *authType)
		}
		if *deployPolicy != "" {
			return util.Errorf("--deploy-policy may not be specified if --auth-type is '%s'", *authType)
		}
		if len(*allowedUsers) != 0 {
			return util.Errorf("--allowed-users may not be specified if --auth-type is '%s'", *authType)
		}

		return nil
	case auth.Keyring:
		if *keyring == "" {
			return util.Errorf("Must specify --keyring if --auth-type is '%s'", *authType)
		}
		if len(*allowedUsers) == 0 {
			return util.Errorf("Must specify at least one allowed user if using a keyring auth type")
		}

		policy, err = auth.NewFileKeyringPolicy(
			*keyring,
			map[types.PodID][]string{
				constants.PreparerPodID: *allowedUsers,
			},
		)
		if err != nil {
			return err
		}
	case auth.User:
		if *keyring == "" {
			return util.Errorf("Must specify --keyring if --auth-type is '%s'", *authType)
		}
		if *deployPolicy == "" {
			return util.Errorf("Must specify --deploy-policy if --auth-type is '%s'", *authType)
		}

		policy, err = auth.NewUserPolicy(
			*keyring,
			*deployPolicy,
			constants.PreparerPodID,
			constants.PreparerPodID.String(),
		)
		if err != nil {
			return err
		}
	default:
		return util.Errorf("Unknown --auth-type: %s", *authType)
	}

	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = new(logrus.TextFormatter)

	err = policy.AuthorizeApp(manifest, logger)
	if err != nil {
		if err, ok := err.(auth.Error); ok {
			logger.WithFields(err.Fields).Errorln(err)
		} else {
			logger.NoFields().Errorln(err)
		}
		return err
	}

	return nil
}
Пример #14
0
func main() {
	kingpin.Version(version.VERSION)
	cmd, opts := flags.ParseWithConsulOptions()

	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = &logrus.TextFormatter{}
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	client := kp.NewConsulClient(opts)
	labeler := labels.NewConsulApplicator(client, 3)
	sched := rc.NewApplicatorScheduler(labeler)
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logging.DefaultLogger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logging.DefaultLogger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = rc.NewApplicatorScheduler(httpLabeler)
	}
	rctl := RCtl{
		baseClient: client,
		rcs:        rcstore.NewConsul(client, 3),
		rls:        rollstore.NewConsul(client),
		kps:        kp.NewConsulStore(client),
		labeler:    labeler,
		sched:      sched,
		hcheck:     checker.NewConsulHealthChecker(client),
		logger:     logger,
	}

	switch cmd {
	case CMD_CREATE:
		rctl.Create(*createManifest, *createNodeSel, *createPodLabels)
	case CMD_DELETE:
		rctl.Delete(*deleteID, *deleteForce)
	case CMD_REPLICAS:
		rctl.SetReplicas(*replicasID, *replicasNum)
	case CMD_LIST:
		rctl.List(*listJSON)
	case CMD_GET:
		rctl.Get(*getID, *getManifest)
	case CMD_ENABLE:
		rctl.Enable(*enableID)
	case CMD_DISABLE:
		rctl.Disable(*disableID)
	case CMD_ROLL:
		rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollDelete)
	case CMD_FARM:
		rctl.Farm()
	case CMD_SCHEDUP:
		rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed, *schedupDelete)
	}
}
Пример #15
0
func main() {
	// Parse custom flags + standard Consul routing options
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	// Set up the logger
	logger := logging.NewLogger(logrus.Fields{})
	logger.Logger.Formatter = new(logrus.TextFormatter)
	if *logLevel != "" {
		lv, err := logrus.ParseLevel(*logLevel)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
				Fatalln("Could not parse log level")
		}
		logger.Logger.Level = lv
	}

	// Initialize the myriad of different storage components
	httpClient := cleanhttp.DefaultClient()
	client := kp.NewConsulClient(opts)
	kpStore := kp.NewConsulStore(client)
	rcStore := rcstore.NewConsul(client, RetryCount)
	rollStore := rollstore.NewConsul(client, nil)
	healthChecker := checker.NewConsulHealthChecker(client)
	labeler := labels.NewConsulApplicator(client, RetryCount)
	var sched scheduler.Scheduler
	if *labelEndpoint != "" {
		endpoint, err := url.Parse(*labelEndpoint)
		if err != nil {
			logger.WithErrorAndFields(err, logrus.Fields{
				"url": *labelEndpoint,
			}).Fatalln("Could not parse URL from label endpoint")
		}
		httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
		if err != nil {
			logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
		}
		sched = scheduler.NewApplicatorScheduler(httpLabeler)
	} else {
		sched = scheduler.NewApplicatorScheduler(labeler)
	}

	// Start acquiring sessions
	sessions := make(chan string)
	go consulutil.SessionManager(api.SessionEntry{
		Name:      SessionName(),
		LockDelay: 5 * time.Second,
		Behavior:  api.SessionBehaviorDelete,
		TTL:       "15s",
	}, client, sessions, nil, logger)
	pub := stream.NewStringValuePublisher(sessions, "")

	alerter := alerting.NewNop()
	if *pagerdutyServiceKey != "" {
		var err error
		alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
		if err != nil {
			logger.WithError(err).Fatalln(
				"Unable to initialize pagerduty alerter",
			)
		}
	}

	// Run the farms!
	go rc.NewFarm(
		kpStore,
		rcStore,
		sched,
		labeler,
		pub.Subscribe().Chan(),
		logger,
		klabels.Everything(),
		alerter,
	).Start(nil)
	roll.NewFarm(
		roll.UpdateFactory{
			KPStore:       kpStore,
			RCStore:       rcStore,
			HealthChecker: healthChecker,
			Labeler:       labeler,
			Scheduler:     sched,
		},
		kpStore,
		rollStore,
		rcStore,
		pub.Subscribe().Chan(),
		logger,
		labeler,
		klabels.Everything(),
		alerter,
	).Start(nil)
}
Пример #16
0
Файл: main.go Проект: rudle/p2
func main() {
	// Other packages define flags, and they need parsing here.
	kingpin.Parse()

	logger := logging.NewLogger(logrus.Fields{})
	configPath := os.Getenv("CONFIG_PATH")
	if configPath == "" {
		logger.NoFields().Fatalln("No CONFIG_PATH variable was given")
	}
	preparerConfig, err := preparer.LoadConfig(configPath)
	if err != nil {
		logger.WithError(err).Fatalln("could not load preparer config")
	}
	err = param.Parse(preparerConfig.Params)
	if err != nil {
		logger.WithError(err).Fatalln("invalid parameter")
	}

	statusServer, err := preparer.NewStatusServer(preparerConfig.StatusPort, preparerConfig.StatusSocket, &logger)
	if err == preparer.NoServerConfigured {
		logger.NoFields().Warningln("No status port or socket provided, no status server configured")
	} else if err != nil {
		logger.WithError(err).Fatalln("Could not start status server")
	} else {
		go statusServer.Serve()
		defer statusServer.Close()
	}

	prep, err := preparer.New(preparerConfig, logger)
	if err != nil {
		logger.WithError(err).Fatalln("Could not initialize preparer")
	}
	defer prep.Close()

	logger.WithFields(logrus.Fields{
		"starting":    true,
		"node_name":   preparerConfig.NodeName,
		"consul":      preparerConfig.ConsulAddress,
		"hooks_dir":   preparerConfig.HooksDirectory,
		"status_port": preparerConfig.StatusPort,
		"auth_type":   preparerConfig.Auth["type"],
		"keyring":     preparerConfig.Auth["keyring"],
		"version":     version.VERSION,
	}).Infoln("Preparer started successfully")

	quitMainUpdate := make(chan struct{})
	quitHookUpdate := make(chan struct{})
	quitChans := []chan struct{}{quitHookUpdate}

	// Guarantee that hooks are synced before any other pods are processed
	err = prep.SyncHooksOnce()
	if err != nil {
		logger.WithError(err).Fatalln("Could not do initial sync of hooks")
	}
	go prep.WatchForPodManifestsForNode(quitMainUpdate)
	go prep.WatchForHooks(quitHookUpdate)

	// Launch health checking watch. This watch tracks health of
	// all pods on this host and writes the information to consul
	quitMonitorPodHealth := make(chan struct{})
	go watch.MonitorPodHealth(preparerConfig, &logger, quitMonitorPodHealth)
	quitChans = append(quitChans, quitMonitorPodHealth)

	waitForTermination(logger, quitMainUpdate, quitChans)

	logger.NoFields().Infoln("Terminating")
}