Ejemplo n.º 1
0
func (r *replication) shouldScheduleForNode(node types.NodeName, logger logging.Logger) bool {
	nodeReality, err := r.queryReality(node)
	if err != nil {
		logger.WithError(err).Errorln("Could not read Reality for this node. Will proceed to schedule onto it.")
		return true
	}
	if err == pods.NoCurrentManifest {
		logger.Infoln("Nothing installed on this node yet.")
		return true
	}

	if nodeReality != nil {
		nodeRealitySHA, err := nodeReality.SHA()
		if err != nil {
			logger.WithError(err).Errorln("Unable to compute manifest SHA for this node. Attempting to schedule anyway")
			return true
		}
		replicationRealitySHA, err := r.manifest.SHA()
		if err != nil {
			logger.WithError(err).Errorln("Unable to compute manifest SHA for this daemon set. Attempting to schedule anyway")
			return true
		}

		if nodeRealitySHA == replicationRealitySHA {
			logger.Info("Reality for this node matches this DS. No action required.")
			return false
		}
	}

	return true
}
Ejemplo n.º 2
0
func NewReplicator(
	manifest manifest.Manifest,
	logger logging.Logger,
	nodes []types.NodeName,
	active int,
	store kp.Store,
	labeler Labeler,
	health checker.ConsulHealthChecker,
	threshold health.HealthState,
	lockMessage string,
	timeout time.Duration,
) (Replicator, error) {
	if active < 1 {
		return replicator{}, util.Errorf("Active must be >= 1, was %d", active)
	}
	if active > 50 {
		logger.Infof("Number of concurrent updates (%v) is greater than 50, reducing to 50", active)
		active = 50
	}
	return replicator{
		manifest:    manifest,
		logger:      logger,
		nodes:       nodes,
		active:      active,
		store:       store,
		labeler:     labeler,
		health:      health,
		threshold:   threshold,
		lockMessage: lockMessage,
		timeout:     timeout,
	}, nil
}
Ejemplo n.º 3
0
func createHelloUUIDPod(dir string, port int, logger logging.Logger) (types.PodUniqueKey, error) {
	signedManifestPath, err := writeHelloManifest(dir, fmt.Sprintf("hello-uuid-%d.yaml", port), port)
	if err != nil {
		return "", err
	}

	logger.Infoln("Scheduling uuid pod")
	cmd := exec.Command("p2-schedule", "--uuid-pod", signedManifestPath)
	stdout := bytes.Buffer{}
	stderr := bytes.Buffer{}
	cmd.Stdout, cmd.Stderr = &stdout, &stderr
	err = cmd.Run()
	if err != nil {
		fmt.Println(stderr.String())
		return "", err
	}

	var out schedule.Output
	err = json.Unmarshal(stdout.Bytes(), &out)
	if err != nil {
		return "", util.Errorf("Scheduled uuid pod but couldn't parse uuid from p2-schedule output: %s", err)
	}

	logger.Infof("Scheduled uuid pod %s", out.PodUniqueKey)
	return out.PodUniqueKey, nil
}
Ejemplo n.º 4
0
func watchStatus(client client.Client, logger logging.Logger) {
	key, err := types.ToPodUniqueKey(*podUniqueKey)
	if err != nil {
		logger.Fatalf("Could not parse passed pod unique key %q as uuid: %s", *podUniqueKey, err)
	}

	ctx, cancelFunc := context.WithCancel(context.Background())
	defer cancelFunc()
	outCh, err := client.WatchStatus(ctx, key, 1) // 1 so we wait for the key to exist
	if err != nil {
		logger.Fatal(err)
	}

	for i := 0; i < *numIterations; i++ {
		val, ok := <-outCh
		if !ok {
			logger.Fatal("Channel closed unexpectedly")
		}

		if val.Error != nil {
			logger.Fatal(val.Error)
		}

		bytes, err := json.Marshal(val)
		if err != nil {
			logger.Fatal(err)
		}

		fmt.Println(string(bytes))
	}
}
Ejemplo n.º 5
0
func (p *Preparer) tryRunHooks(hookType hooks.HookType, pod hooks.Pod, manifest manifest.Manifest, logger logging.Logger) {
	err := p.hooks.RunHookType(hookType, pod, manifest)
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"hooks": hookType}).Warnln("Could not run hooks")
	}
}
Ejemplo n.º 6
0
// MonitorPodHealth is meant to be a long running go routine.
// MonitorPodHealth reads from a consul store to determine which
// services should be running on the host. MonitorPodHealth
// runs a CheckHealth routine to monitor the health of each
// service and kills routines for services that should no
// longer be running.
func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) {
	var store kp.Store

	consul := config.ConsulAddress
	node := config.NodeName
	pods := []PodWatch{}
	authtoken, err := preparer.LoadConsulToken(config.ConsulTokenPath)
	if err != nil {
		logger.WithField("inner_err", err).Warningln("Could not load consul token")
	}

	store = kp.NewConsulStore(kp.Options{
		Address: consul,
		HTTPS:   false,
		Token:   authtoken,
		Client:  net.NewHeaderClient(nil, http.DefaultTransport),
	})
	pods = updateHealthMonitors(store, pods, node, logger)
	for {
		select {
		case <-time.After(POLL_KV_FOR_PODS):
			// check if pods have been added or removed
			// starts monitor routine for new pods
			// kills monitor routine for removed pods
			pods = updateHealthMonitors(store, pods, node, logger)
		case <-shutdownCh:
			return
		}
	}
}
Ejemplo n.º 7
0
// Create a new Update. The kp.Store, rcstore.Store, labels.Applicator and
// scheduler.Scheduler arguments should be the same as those of the RCs themselves. The
// session must be valid for the lifetime of the Update; maintaining this is the
// responsibility of the caller.
func NewUpdate(
	f fields.Update,
	kps kp.Store,
	rcs rcstore.Store,
	hcheck checker.ConsulHealthChecker,
	labeler rc.Labeler,
	sched scheduler.Scheduler,
	logger logging.Logger,
	session kp.Session,
	alerter alerting.Alerter,
) Update {
	if alerter == nil {
		alerter = alerting.NewNop()
	}

	logger = logger.SubLogger(logrus.Fields{
		"desired_replicas": f.DesiredReplicas,
		"minimum_replicas": f.MinimumReplicas,
	})
	return &update{
		Update:  f,
		kps:     kps,
		rcs:     rcs,
		hcheck:  hcheck,
		labeler: labeler,
		sched:   sched,
		logger:  logger,
		session: session,
		alerter: alerter,
	}
}
Ejemplo n.º 8
0
func verifyLegacyPod(errCh chan error, tempDir string, config *preparer.PreparerConfig, services []string, logger logging.Logger) {
	defer close(errCh)
	// Schedule a "legacy" hello pod using a replication controller
	rcID, err := createHelloReplicationController(tempDir)
	if err != nil {
		errCh <- fmt.Errorf("Could not create hello pod / rc: %s", err)
		return
	}
	logger.Infof("Created RC #%s for hello\n", rcID)

	err = waitForPodLabeledWithRC(klabels.Everything().Add(rc.RCIDLabel, klabels.EqualsOperator, []string{rcID.String()}), rcID)
	if err != nil {
		errCh <- fmt.Errorf("Failed waiting for pods labeled with the given RC: %v", err)
		return
	}
	err = verifyHelloRunning("", logger)
	if err != nil {
		errCh <- fmt.Errorf("Couldn't get hello running: %s", err)
		return
	}
	err = verifyHealthChecks(config, services)
	if err != nil {
		errCh <- fmt.Errorf("Could not get health check info from consul: %s", err)
		return
	}
}
Ejemplo n.º 9
0
func (p FixedKeyringPolicy) AuthorizeApp(manifest Manifest, logger logging.Logger) error {
	plaintext, signature := manifest.SignatureData()
	if signature == nil {
		return Error{util.Errorf("received unsigned manifest (expected signature)"), nil}
	}
	signer, err := checkDetachedSignature(p.Keyring, plaintext, signature)
	if err != nil {
		return err
	}

	signerId := fmt.Sprintf("%X", signer.PrimaryKey.Fingerprint)
	logger.WithField("signer_key", signerId).Debugln("resolved manifest signature")

	// Check authorization for this package to be deployed by this
	// key, if configured.
	if len(p.AuthorizedDeployers[manifest.ID()]) > 0 {
		found := false
		for _, deployerId := range p.AuthorizedDeployers[manifest.ID()] {
			if deployerId == signerId {
				found = true
				break
			}
		}
		if !found {
			return Error{
				util.Errorf("manifest signer not authorized to deploy " + manifest.ID()),
				map[string]interface{}{"signer_key": signerId},
			}
		}
	}

	return nil
}
Ejemplo n.º 10
0
Archivo: farm.go Proyecto: drcapulet/p2
func (dsf *Farm) handleSessionExpiry(dsFields ds_fields.DaemonSet, dsLogger logging.Logger, err error) {
	dsLogger.WithFields(logrus.Fields{
		"ID":           dsFields.ID,
		"Name":         dsFields.Name,
		"NodeSelector": dsFields.NodeSelector.String(),
		"PodID":        dsFields.PodID,
	}).WithError(err).Errorln("Got error while locking daemon set in ds farm - session may be expired")
}
Ejemplo n.º 11
0
func updateHealthMonitors(store kp.Store, watchedPods []PodWatch, node string, logger *logging.Logger) []PodWatch {
	path := kp.RealityPath(node)
	reality, _, err := store.ListPods(path)
	if err != nil {
		logger.WithField("inner_err", err).Warningln("failed to get pods from reality store")
	}

	return updatePods(watchedPods, reality, logger, store, node)
}
Ejemplo n.º 12
0
func (p *Preparer) tryRunHooks(hookType hooks.HookType, pod hooks.Pod, manifest *pods.Manifest, logger logging.Logger) {
	err := p.hooks.RunHookType(hookType, pod, manifest)
	if err != nil {
		logger.WithFields(logrus.Fields{
			"err":   err,
			"hooks": hookType,
		}).Warnln("Could not run hooks")
	}
}
Ejemplo n.º 13
0
Archivo: setup.go Proyecto: tomzhang/p2
func addHooks(preparerConfig *PreparerConfig, logger logging.Logger) {
	for _, dest := range preparerConfig.ExtraLogDestinations {
		logger.WithFields(logrus.Fields{
			"type": dest.Type,
			"path": dest.Path,
		}).Infoln("Adding log destination")
		logger.AddHook(dest.Type, dest.Path)
	}
}
Ejemplo n.º 14
0
func waitForTermination(logger logging.Logger, quitMainUpdate, quitHookUpdate chan struct{}, quitMonitorPodHealth chan struct{}) {
	signalCh := make(chan os.Signal, 2)
	signal.Notify(signalCh, syscall.SIGTERM, os.Interrupt)
	received := <-signalCh
	logger.WithField("signal", received.String()).Infoln("Stopping work")
	quitHookUpdate <- struct{}{}
	quitMainUpdate <- struct{}{}
	quitMonitorPodHealth <- struct{}{}
	<-quitMainUpdate // acknowledgement
}
Ejemplo n.º 15
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan ManifestPair, quit <-chan struct{}) {
	// install new launchables
	var nextLaunch ManifestPair

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case nextLaunch = <-podChan:
			var sha string
			if nextLaunch.Intent != nil {
				sha, _ = nextLaunch.Intent.SHA()
			} else {
				sha, _ = nextLaunch.Reality.SHA()
			}
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod": nextLaunch.ID,
				"sha": sha,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			if nextLaunch.Intent == nil {
				// if intent=nil then reality!=nil and we need to delete the pod
				// therefore we must set working=true here
				working = true
			} else {
				// non-nil intent manifests need to be authorized first
				working = p.authorize(nextLaunch.Intent, manifestLogger)
				if !working {
					p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID)), nextLaunch.Intent, manifestLogger)
				}
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID))

				// TODO better solution: force the preparer to have a 0s default timeout, prevent KILLs
				if pod.Id == POD_ID {
					pod.DefaultTimeout = time.Duration(0)
				}

				ok := p.resolvePair(nextLaunch, pod, manifestLogger)
				if ok {
					nextLaunch = ManifestPair{}
					working = false
				}
			}
		}
	}
}
Ejemplo n.º 16
0
// retries a given function until it returns a nil error or the quit channel is
// closed. returns true if it exited in the former case, false in the latter.
// errors are sent to the given logger with the given string as the message.
func RetryOrQuit(f func() error, quit <-chan struct{}, logger logging.Logger, errtext string) bool {
	for err := f(); err != nil; err = f() {
		logger.WithError(err).Errorln(errtext)
		select {
		case <-quit:
			return false
		case <-time.After(1 * time.Second):
			// unblock the select and loop again
		}
	}
	return true
}
Ejemplo n.º 17
0
// check if a manifest satisfies the authorization requirement of this preparer
func (p *Preparer) authorize(manifest manifest.Manifest, logger logging.Logger) bool {
	err := p.authPolicy.AuthorizeApp(manifest, logger)
	if err != nil {
		if err, ok := err.(auth.Error); ok {
			logger.WithFields(err.Fields).Errorln(err)
		} else {
			logger.NoFields().Errorln(err)
		}
		return false
	}
	return true
}
Ejemplo n.º 18
0
func (p *Preparer) resolvePair(pair ManifestPair, pod Pod, logger logging.Logger) bool {
	// do not remove the logger argument, it's not the same as p.Logger
	var oldSHA, newSHA string
	if pair.Reality != nil {
		oldSHA, _ = pair.Reality.SHA()
	}
	if pair.Intent != nil {
		newSHA, _ = pair.Intent.SHA()
	}

	if oldSHA == "" {
		logger.NoFields().Infoln("manifest is new, will update")
		return p.installAndLaunchPod(pair, pod, logger)
	}

	if newSHA == "" {
		logger.NoFields().Infoln("manifest was deleted from intent, will remove")
		return p.stopAndUninstallPod(pair, pod, logger)
	}

	if oldSHA == newSHA {
		logger.NoFields().Debugln("manifest is unchanged, no action required")
		return true
	}

	logger.WithField("old_sha", oldSHA).Infoln("manifest SHA has changed, will update")
	return p.installAndLaunchPod(pair, pod, logger)

}
Ejemplo n.º 19
0
// Helper to processHealthUpdater()
func sendHealthUpdate(
	logger logging.Logger,
	w chan<- writeResult,
	health *WatchResult,
	doThrottle bool,
	sender func() error,
) {
	if err := sender(); err != nil {
		logger.WithError(err).Error("error writing health")
		// Try not to overwhelm Consul
		time.Sleep(time.Duration(*HealthRetryTimeSec) * time.Second)
		w <- writeResult{nil, false, doThrottle}
	} else {
		w <- writeResult{health, true, doThrottle}
	}
}
Ejemplo n.º 20
0
func verifyUUIDPod(errCh chan error, tempDir string, logger logging.Logger) {
	defer close(errCh)

	// Schedule a "uuid" hello pod on a different port
	podUniqueKey, err := createHelloUUIDPod(tempDir, 43771, logger)
	if err != nil {
		errCh <- fmt.Errorf("Could not schedule UUID hello pod: %s", err)
		return
	}
	logger.Infoln("p2-schedule'd another hello instance as a uuid pod running on port 43771")

	err = verifyHelloRunning(podUniqueKey, logger)
	if err != nil {
		errCh <- fmt.Errorf("Couldn't get hello running as a uuid pod: %s", err)
		return
	}
}
Ejemplo n.º 21
0
// MonitorPodHealth is meant to be a long running go routine.
// MonitorPodHealth reads from a consul store to determine which
// services should be running on the host. MonitorPodHealth
// runs a CheckHealth routine to monitor the health of each
// service and kills routines for services that should no
// longer be running.
func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) {
	client, err := config.GetConsulClient()
	if err != nil {
		// A bad config should have already produced a nice, user-friendly error message.
		logger.WithError(err).Fatalln("error creating health monitor KV client")
	}
	store := kp.NewConsulStore(client)
	healthManager := store.NewHealthManager(config.NodeName, *logger)

	node := config.NodeName
	pods := []PodWatch{}

	watchQuitCh := make(chan struct{})
	watchErrCh := make(chan error)
	watchPodCh := make(chan []kp.ManifestResult)
	go store.WatchPods(
		kp.REALITY_TREE,
		node,
		watchQuitCh,
		watchErrCh,
		watchPodCh,
	)

	// if GetClient fails it means the certfile/keyfile/cafile were
	// invalid or did not exist. It makes sense to throw a fatal error
	secureClient, err := config.GetClient(time.Duration(*HEALTHCHECK_TIMEOUT) * time.Second)
	if err != nil {
		logger.WithError(err).Fatalln("failed to get http client for this preparer")
	}

	insecureClient, err := config.GetInsecureClient(time.Duration(*HEALTHCHECK_TIMEOUT) * time.Second)
	if err != nil {
		logger.WithError(err).Fatalln("failed to get http client for this preparer")
	}

	for {
		select {
		case results := <-watchPodCh:
			// check if pods have been added or removed
			// starts monitor routine for new pods
			// kills monitor routine for removed pods
			pods = updatePods(healthManager, secureClient, insecureClient, pods, results, node, logger)
		case err := <-watchErrCh:
			logger.WithError(err).Errorln("there was an error reading reality manifests for health monitor")
		case <-shutdownCh:
			for _, pod := range pods {
				pod.shutdownCh <- true
			}
			close(watchQuitCh)
			healthManager.Close()
			return
		}
	}
}
Ejemplo n.º 22
0
Archivo: farm.go Proyecto: rudle/p2
// Validates that the rolling update is capable of being processed. If not, an
// error is returned.
// The following conditions make an RU invalid:
// 1) New RC does not exist
// 2) Old RC does not exist
func (rlf *Farm) validateRoll(update roll_fields.Update, logger logging.Logger) error {
	_, err := rlf.rcs.Get(update.NewRC)
	if err == rcstore.NoReplicationController {
		return fmt.Errorf("RU '%s' is invalid, new RC '%s' did not exist", update.ID(), update.NewRC)
	} else if err != nil {
		// There was a potentially transient consul error, we don't necessarily want to delete the RU
		logger.WithError(err).Errorln("Could not fetch new RC to validate RU, assuming it's valid")
	}

	_, err = rlf.rcs.Get(update.OldRC)
	if err == rcstore.NoReplicationController {
		return fmt.Errorf("RU '%s' is invalid, old RC '%s' did not exist", update.ID(), update.OldRC)
	} else if err != nil {
		// There was a potentially transient consul error, we don't necessarily want to delete the RU
		logger.WithError(err).Errorln("Could not fetch old RC in order to validate RU, assuming it's valid")
	}

	return nil
}
Ejemplo n.º 23
0
func (p *Preparer) resolvePair(pair ManifestPair, pod Pod, logger logging.Logger) bool {
	// do not remove the logger argument, it's not the same as p.Logger
	var oldSHA, newSHA string
	if pair.Reality != nil {
		oldSHA, _ = pair.Reality.SHA()
	}
	if pair.Intent != nil {
		newSHA, _ = pair.Intent.SHA()
	}

	if oldSHA == "" && newSHA != "" {
		logger.NoFields().Infoln("manifest is new, will update")
		authorized := p.authorize(pair.Intent, logger)
		if !authorized {
			p.tryRunHooks(
				hooks.AFTER_AUTH_FAIL,
				pod,
				pair.Intent,
				logger,
			)
			// prevent future unnecessary loops, we don't need to check again.
			return true
		}
		return p.installAndLaunchPod(pair, pod, logger)
	}

	if newSHA == "" {
		logger.NoFields().Infoln("manifest was deleted from intent, will remove")
		return p.stopAndUninstallPod(pair, pod, logger)
	}

	if oldSHA == newSHA {
		logger.NoFields().Debugln("manifest is unchanged, no action required")
		return true
	}

	authorized := p.authorize(pair.Intent, logger)
	if !authorized {
		p.tryRunHooks(
			hooks.AFTER_AUTH_FAIL,
			pod,
			pair.Intent,
			logger,
		)
		// prevent future unnecessary loops, we don't need to check again.
		return true
	}

	logger.WithField("old_sha", oldSHA).Infoln("manifest SHA has changed, will update")
	return p.installAndLaunchPod(pair, pod, logger)

}
Ejemplo n.º 24
0
func (r *replication) ensureHealthy(
	node types.NodeName,
	timeoutCh <-chan struct{},
	nodeLogger logging.Logger,
	aggregateHealth *podHealth,
) error {
	for {
		select {
		case <-r.quitCh:
			r.logger.Infoln("Caught quit signal during ensureHealthy")
			return errQuit
		case <-timeoutCh:
			r.logger.Infoln("Caught node timeout signal during ensureHealthy")
			return errTimeout
		case <-r.replicationCancelledCh:
			r.logger.Infoln("Caught cancellation signal during ensureHealthy")
			return errCancelled
		case <-time.After(time.Duration(*ensureHealthyPeriodMillis) * time.Millisecond):
			res, ok := aggregateHealth.GetHealth(node)
			if !ok {
				nodeLogger.WithFields(logrus.Fields{
					"node": node,
				}).Errorln("Could not get health, retrying")
				// Zero res should be treated like "critical"
			}
			id := res.ID
			status := res.Status
			// treat an empty threshold as "passing"
			threshold := health.Passing
			if r.threshold != "" {
				threshold = r.threshold
			}
			// is this status less than the threshold?
			if health.Compare(status, threshold) < 0 {
				nodeLogger.WithFields(logrus.Fields{"check": id, "health": status}).Infoln("Node is not healthy")
			} else {
				r.logger.WithField("node", node).Infoln("Node is current and healthy")
				return nil
			}
		}
	}
}
Ejemplo n.º 25
0
func (r *replication) ensureInReality(
	node types.NodeName,
	timeoutCh <-chan struct{},
	nodeLogger logging.Logger,
	targetSHA string,
) error {
	for {
		select {
		case <-r.quitCh:
			return errQuit
		case <-timeoutCh:
			return errTimeout
		case <-r.replicationCancelledCh:
			return errCancelled
		case <-time.After(5 * time.Second):
			man, err := r.queryReality(node)
			if err == pods.NoCurrentManifest {
				// if the pod key doesn't exist yet, that's okay just wait longer
			} else if err != nil {
				nodeLogger.WithErrorAndFields(err, logrus.Fields{
					"node": node,
				}).Errorln("Could not read reality for pod manifest")
			} else {
				receivedSHA, _ := man.SHA()
				if receivedSHA == targetSHA {
					nodeLogger.NoFields().Infoln("Node is current")
					return nil
				} else {
					nodeLogger.WithFields(logrus.Fields{"current": receivedSHA, "target": targetSHA}).Infoln("Waiting for current")
				}
			}
		}
	}
}
Ejemplo n.º 26
0
func schedule(client client.Client, logger logging.Logger) {
	m, err := manifest.FromPath(*manifestFile)
	if err != nil {
		logger.Fatalf("Could not read manifest: %s", err)
	}

	podUniqueKey, err := client.Schedule(m, types.NodeName(*node))
	if err != nil {
		logger.Fatalf("Could not schedule: %s", err)
	}

	output := struct {
		PodID        types.PodID        `json:"pod_id"`
		PodUniqueKey types.PodUniqueKey `json:"pod_unique_key"`
	}{
		PodID:        m.ID(),
		PodUniqueKey: podUniqueKey,
	}

	outBytes, err := json.Marshal(output)
	if err != nil {
		logger.Infof("Scheduled pod with key: %s", podUniqueKey)
		return
	}

	fmt.Println(string(outBytes))
}
Ejemplo n.º 27
0
Archivo: hooks.go Proyecto: rudle/p2
func (h *HookDir) runHooks(dirpath string, hType HookType, pod Pod, podManifest manifest.Manifest, logger logging.Logger) error {
	configFileName, err := podManifest.ConfigFileName()
	if err != nil {
		return err
	}

	// Write manifest to a file so hooks can read it.
	tmpManifestFile, err := ioutil.TempFile("", fmt.Sprintf("%s-manifest.yaml", podManifest.ID()))
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to open manifest file for hooks")
		return err
	}
	defer os.Remove(tmpManifestFile.Name())

	err = podManifest.Write(tmpManifestFile)
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to write manifest file for hooks")
		return err
	}

	hookEnvironment := []string{
		fmt.Sprintf("%s=%s", HOOK_ENV_VAR, path.Base(dirpath)),
		fmt.Sprintf("%s=%s", HOOK_EVENT_ENV_VAR, hType.String()),
		fmt.Sprintf("%s=%s", HOOKED_NODE_ENV_VAR, pod.Node()),
		fmt.Sprintf("%s=%s", HOOKED_POD_ID_ENV_VAR, podManifest.ID()),
		fmt.Sprintf("%s=%s", HOOKED_POD_HOME_ENV_VAR, pod.Home()),
		fmt.Sprintf("%s=%s", HOOKED_POD_MANIFEST_ENV_VAR, tmpManifestFile.Name()),
		fmt.Sprintf("%s=%s", HOOKED_CONFIG_PATH_ENV_VAR, path.Join(pod.ConfigDir(), configFileName)),
		fmt.Sprintf("%s=%s", HOOKED_ENV_PATH_ENV_VAR, pod.EnvDir()),
		fmt.Sprintf("%s=%s", HOOKED_CONFIG_DIR_PATH_ENV_VAR, pod.ConfigDir()),
		fmt.Sprintf("%s=%s", HOOKED_SYSTEM_POD_ROOT_ENV_VAR, h.podRoot),
	}

	return runDirectory(dirpath, hookEnvironment, logger)
}
Ejemplo n.º 28
0
// SessionManager continually creates and maintains Consul sessions. It is intended to be
// run in its own goroutine. If one session expires, a new one will be created. As
// sessions come and go, the session ID (or "" for an expired session) will be sent on the
// output channel.
//
// Parameters:
//   config:  Configuration passed to Consul when creating a new session.
//   client:  The Consul client to use.
//   output:  The channel used for exposing Consul session IDs. This method takes
//            ownership of this channel and will close it once no new IDs will be created.
//   done:    Close this channel to close the current session (if any) and stop creating
//            new sessions.
//   logger:  Errors will be logged to this logger.
func SessionManager(
	config api.SessionEntry,
	client ConsulClient,
	output chan<- string,
	done chan struct{},
	logger logging.Logger,
) {
	logger.NoFields().Info("session manager: starting up")
	for {
		// Check for exit signal
		select {
		case <-done:
			logger.NoFields().Info("session manager: shutting down")
			close(output)
			return
		default:
		}
		// Establish a new session
		id, _, err := client.Session().CreateNoChecks(&config, nil)
		if err != nil {
			logger.WithError(err).Error("session manager: error creating Consul session")
			time.Sleep(time.Duration(*SessionRetrySeconds) * time.Second)
			continue
		}
		sessionLogger := logger.SubLogger(logrus.Fields{
			"session": id,
		})
		sessionLogger.NoFields().Info("session manager: new Consul session")
		select {
		case output <- id:
			// Maintain the session
			err = client.Session().RenewPeriodic(config.TTL, id, nil, done)
			if err != nil {
				sessionLogger.WithError(err).Error("session manager: lost session")
			} else {
				sessionLogger.NoFields().Info("session manager: released session")
			}
			select {
			case output <- "":
			case <-done:
			}
		case <-done:
			// Don't bother reporting the new session if exiting
			_, _ = client.Session().Destroy(id, nil)
			sessionLogger.NoFields().Info("session manager: released session")
		}
	}
}
Ejemplo n.º 29
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan pods.Manifest, quit <-chan struct{}) {
	// install new launchables
	var manifestToLaunch pods.Manifest

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger
	for {
		select {
		case <-quit:
			return
		case manifestToLaunch = <-podChan:
			sha, err := manifestToLaunch.SHA()
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod":     manifestToLaunch.ID(),
				"sha":     sha,
				"sha_err": err,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			working = p.authorize(manifestToLaunch, manifestLogger)
			if !working {
				p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())), &manifestToLaunch, manifestLogger)
			}
		case <-time.After(1 * time.Second):
			if working {
				pod := pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID()))

				ok := p.installAndLaunchPod(&manifestToLaunch, pod, manifestLogger)
				if ok {
					manifestToLaunch = pods.Manifest{}
					working = false
				}
			}
		}
	}
}
Ejemplo n.º 30
0
func (p *Preparer) stopAndUninstallPod(pair ManifestPair, pod Pod, logger logging.Logger) bool {
	success, err := pod.Halt(pair.Reality)
	if err != nil {
		logger.WithError(err).Errorln("Pod halt failed")
	} else if !success {
		logger.NoFields().Warnln("One or more launchables did not halt successfully")
	}

	p.tryRunHooks(hooks.BEFORE_UNINSTALL, pod, pair.Reality, logger)

	err = pod.Uninstall()
	if err != nil {
		logger.WithError(err).Errorln("Uninstall failed")
		return false
	}
	logger.NoFields().Infoln("Successfully uninstalled")

	dur, err := p.store.DeletePod(kp.REALITY_TREE, p.node, pair.ID)
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{"duration": dur}).
			Errorln("Could not delete pod from reality store")
	}
	return true
}