func (p FixedKeyringPolicy) AuthorizeApp(manifest Manifest, logger logging.Logger) error { plaintext, signature := manifest.SignatureData() if signature == nil { return Error{util.Errorf("received unsigned manifest (expected signature)"), nil} } signer, err := checkDetachedSignature(p.Keyring, plaintext, signature) if err != nil { return err } signerId := fmt.Sprintf("%X", signer.PrimaryKey.Fingerprint) logger.WithField("signer_key", signerId).Debugln("resolved manifest signature") // Check authorization for this package to be deployed by this // key, if configured. if len(p.AuthorizedDeployers[manifest.ID()]) > 0 { found := false for _, deployerId := range p.AuthorizedDeployers[manifest.ID()] { if deployerId == signerId { found = true break } } if !found { return Error{ util.Errorf("manifest signer not authorized to deploy " + manifest.ID()), map[string]interface{}{"signer_key": signerId}, } } } return nil }
func (p *Preparer) resolvePair(pair ManifestPair, pod Pod, logger logging.Logger) bool { // do not remove the logger argument, it's not the same as p.Logger var oldSHA, newSHA string if pair.Reality != nil { oldSHA, _ = pair.Reality.SHA() } if pair.Intent != nil { newSHA, _ = pair.Intent.SHA() } if oldSHA == "" { logger.NoFields().Infoln("manifest is new, will update") return p.installAndLaunchPod(pair, pod, logger) } if newSHA == "" { logger.NoFields().Infoln("manifest was deleted from intent, will remove") return p.stopAndUninstallPod(pair, pod, logger) } if oldSHA == newSHA { logger.NoFields().Debugln("manifest is unchanged, no action required") return true } logger.WithField("old_sha", oldSHA).Infoln("manifest SHA has changed, will update") return p.installAndLaunchPod(pair, pod, logger) }
// MonitorPodHealth is meant to be a long running go routine. // MonitorPodHealth reads from a consul store to determine which // services should be running on the host. MonitorPodHealth // runs a CheckHealth routine to monitor the health of each // service and kills routines for services that should no // longer be running. func MonitorPodHealth(config *preparer.PreparerConfig, logger *logging.Logger, shutdownCh chan struct{}) { var store kp.Store consul := config.ConsulAddress node := config.NodeName pods := []PodWatch{} authtoken, err := preparer.LoadConsulToken(config.ConsulTokenPath) if err != nil { logger.WithField("inner_err", err).Warningln("Could not load consul token") } store = kp.NewConsulStore(kp.Options{ Address: consul, HTTPS: false, Token: authtoken, Client: net.NewHeaderClient(nil, http.DefaultTransport), }) pods = updateHealthMonitors(store, pods, node, logger) for { select { case <-time.After(POLL_KV_FOR_PODS): // check if pods have been added or removed // starts monitor routine for new pods // kills monitor routine for removed pods pods = updateHealthMonitors(store, pods, node, logger) case <-shutdownCh: return } } }
func updateHealthMonitors(store kp.Store, watchedPods []PodWatch, node string, logger *logging.Logger) []PodWatch { path := kp.RealityPath(node) reality, _, err := store.ListPods(path) if err != nil { logger.WithField("inner_err", err).Warningln("failed to get pods from reality store") } return updatePods(watchedPods, reality, logger, store, node) }
func waitForTermination(logger logging.Logger, quitMainUpdate, quitHookUpdate chan struct{}, quitMonitorPodHealth chan struct{}) { signalCh := make(chan os.Signal, 2) signal.Notify(signalCh, syscall.SIGTERM, os.Interrupt) received := <-signalCh logger.WithField("signal", received.String()).Infoln("Stopping work") quitHookUpdate <- struct{}{} quitMainUpdate <- struct{}{} quitMonitorPodHealth <- struct{}{} <-quitMainUpdate // acknowledgement }
func (p *Preparer) resolvePair(pair ManifestPair, pod Pod, logger logging.Logger) bool { // do not remove the logger argument, it's not the same as p.Logger var oldSHA, newSHA string if pair.Reality != nil { oldSHA, _ = pair.Reality.SHA() } if pair.Intent != nil { newSHA, _ = pair.Intent.SHA() } if oldSHA == "" && newSHA != "" { logger.NoFields().Infoln("manifest is new, will update") authorized := p.authorize(pair.Intent, logger) if !authorized { p.tryRunHooks( hooks.AFTER_AUTH_FAIL, pod, pair.Intent, logger, ) // prevent future unnecessary loops, we don't need to check again. return true } return p.installAndLaunchPod(pair, pod, logger) } if newSHA == "" { logger.NoFields().Infoln("manifest was deleted from intent, will remove") return p.stopAndUninstallPod(pair, pod, logger) } if oldSHA == newSHA { logger.NoFields().Debugln("manifest is unchanged, no action required") return true } authorized := p.authorize(pair.Intent, logger) if !authorized { p.tryRunHooks( hooks.AFTER_AUTH_FAIL, pod, pair.Intent, logger, ) // prevent future unnecessary loops, we don't need to check again. return true } logger.WithField("old_sha", oldSHA).Infoln("manifest SHA has changed, will update") return p.installAndLaunchPod(pair, pod, logger) }
// runDirectory executes all executable files in a given directory path. func runDirectory(dirpath string, environment []string, logger logging.Logger) error { entries, err := ioutil.ReadDir(dirpath) if os.IsNotExist(err) { logger.WithField("dir", dirpath).Debugln("Hooks not set up") return nil } if err != nil { return err } for _, f := range entries { fullpath := path.Join(dirpath, f.Name()) executable := (f.Mode() & 0111) != 0 if !executable { logger.WithField("path", fullpath).Warnln("Hook is not executable") continue } if f.IsDir() { continue } h := Hook{fullpath, f.Name(), DefaultTimeout, environment, logger} err := h.RunWithTimeout() if htErr, ok := err.(HookTimeoutError); ok { logger.WithErrorAndFields(htErr, logrus.Fields{ "path": h.Path, "hook_name": h.Name, "timeout": h.Timeout, }).Warnln(htErr.Error()) // we intentionally swallow timeout HookTimeoutErrors } else if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "path": h.Path, "hook_name": h.Name, }).Warningf("Unknown error in hook %s: %s", h.Name, err) } } return nil }
func runDirectory(dirpath string, environment []string, logger logging.Logger) error { entries, err := ioutil.ReadDir(dirpath) if os.IsNotExist(err) { logger.WithField("dir", dirpath).Debugln("Hooks not set up") return nil } if err != nil { return err } for _, f := range entries { fullpath := path.Join(dirpath, f.Name()) executable := (f.Mode() & 0111) != 0 if !executable { logger.WithField("path", fullpath).Warnln("Hook is not executable") continue } if f.IsDir() { continue } cmd := exec.Command(fullpath) hookOut := &bytes.Buffer{} cmd.Stdout = hookOut cmd.Stderr = hookOut cmd.Env = environment err := cmd.Run() if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "path": fullpath, "output": hookOut.String(), }).Warnf("Could not execute hook %s", f.Name()) } else { logger.WithFields(logrus.Fields{ "path": fullpath, "output": hookOut.String(), }).Debugln("Executed hook") } } return nil }
func (p *Preparer) installAndLaunchPod(newManifest *pods.Manifest, pod Pod, logger logging.Logger) bool { // do not remove the logger argument, it's not the same as p.Logger // get currently running pod to compare with the new pod realityPath := kp.RealityPath(p.node, newManifest.ID()) currentManifest, _, err := p.store.Pod(realityPath) currentSHA := "" if currentManifest != nil { currentSHA, _ = currentManifest.SHA() } newSHA, _ := newManifest.SHA() // if new or the manifest is different, launch newOrDifferent := (err == pods.NoCurrentManifest) || (currentSHA != newSHA) if newOrDifferent { logger.WithFields(logrus.Fields{ "old_sha": currentSHA, "sha": newSHA, "pod": newManifest.ID(), }).Infoln("SHA is new or different from old, will update") } // if the old manifest is corrupted somehow, re-launch since we don't know if this is an update. problemReadingCurrentManifest := (err != nil && err != pods.NoCurrentManifest) if problemReadingCurrentManifest { logger.WithFields(logrus.Fields{ "sha": newSHA, "inner_err": err, }).Errorln("Current manifest not readable, will relaunch") } if newOrDifferent || problemReadingCurrentManifest { p.tryRunHooks(hooks.BEFORE_INSTALL, pod, newManifest, logger) err = pod.Install(newManifest) if err != nil { // install failed, abort and retry logger.WithFields(logrus.Fields{ "err": err, }).Errorln("Install failed") return false } err = pod.Verify(newManifest, p.authPolicy) if err != nil { logger.WithField("err", err).Errorln("Pod digest verification failed") p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pod, newManifest, logger) return false } p.tryRunHooks(hooks.AFTER_INSTALL, pod, newManifest, logger) err = p.store.RegisterService(*newManifest, p.caPath) if err != nil { logger.WithField("err", err).Errorln("Service registration failed") return false } if currentManifest != nil { success, err := pod.Halt(currentManifest) if err != nil { logger.WithField("err", err).Errorln("Pod halt failed") } else if !success { logger.NoFields().Warnln("One or more launchables did not halt successfully") } } ok, err := pod.Launch(newManifest) if err != nil { logger.WithFields(logrus.Fields{ "err": err, }).Errorln("Launch failed") } else { duration, err := p.store.SetPod(realityPath, *newManifest) if err != nil { logger.WithFields(logrus.Fields{ "err": err, "duration": duration, }).Errorln("Could not set pod in reality store") } p.tryRunHooks(hooks.AFTER_LAUNCH, pod, newManifest, logger) } return err == nil && ok } // TODO: shut down removed launchables between pod versions. return true }