// no return value, no output channels. This should do everything it needs to do // without outside intervention (other than being signalled to quit) func (p *Preparer) handlePods(podChan <-chan pods.Manifest, quit <-chan struct{}) { // install new launchables var manifestToLaunch pods.Manifest // used to track if we have work to do (i.e. pod manifest came through channel // and we have yet to operate on it) working := false var manifestLogger logging.Logger for { select { case <-quit: return case manifestToLaunch = <-podChan: sha, err := manifestToLaunch.SHA() manifestLogger = p.Logger.SubLogger(logrus.Fields{ "pod": manifestToLaunch.ID(), "sha": sha, "sha_err": err, }) manifestLogger.NoFields().Debugln("New manifest received") working = p.authorize(manifestToLaunch, manifestLogger) if !working { p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())), &manifestToLaunch, manifestLogger) } case <-time.After(1 * time.Second): if working { pod := pods.NewPod(manifestToLaunch.ID(), pods.PodPath(p.podRoot, manifestToLaunch.ID())) ok := p.installAndLaunchPod(&manifestToLaunch, pod, manifestLogger) if ok { manifestToLaunch = pods.Manifest{} working = false } } } } }
func (p *Preparer) installAndLaunchPod(newManifest *pods.Manifest, pod Pod, logger logging.Logger) bool { // do not remove the logger argument, it's not the same as p.Logger // get currently running pod to compare with the new pod realityPath := kp.RealityPath(p.node, newManifest.ID()) currentManifest, _, err := p.store.Pod(realityPath) currentSHA := "" if currentManifest != nil { currentSHA, _ = currentManifest.SHA() } newSHA, _ := newManifest.SHA() // if new or the manifest is different, launch newOrDifferent := (err == pods.NoCurrentManifest) || (currentSHA != newSHA) if newOrDifferent { logger.WithFields(logrus.Fields{ "old_sha": currentSHA, "sha": newSHA, "pod": newManifest.ID(), }).Infoln("SHA is new or different from old, will update") } // if the old manifest is corrupted somehow, re-launch since we don't know if this is an update. problemReadingCurrentManifest := (err != nil && err != pods.NoCurrentManifest) if problemReadingCurrentManifest { logger.WithFields(logrus.Fields{ "sha": newSHA, "inner_err": err, }).Errorln("Current manifest not readable, will relaunch") } if newOrDifferent || problemReadingCurrentManifest { p.tryRunHooks(hooks.BEFORE_INSTALL, pod, newManifest, logger) err = pod.Install(newManifest) if err != nil { // install failed, abort and retry logger.WithFields(logrus.Fields{ "err": err, }).Errorln("Install failed") return false } err = pod.Verify(newManifest, p.authPolicy) if err != nil { logger.WithField("err", err).Errorln("Pod digest verification failed") p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pod, newManifest, logger) return false } p.tryRunHooks(hooks.AFTER_INSTALL, pod, newManifest, logger) err = p.store.RegisterService(*newManifest, p.caPath) if err != nil { logger.WithField("err", err).Errorln("Service registration failed") return false } if currentManifest != nil { success, err := pod.Halt(currentManifest) if err != nil { logger.WithField("err", err).Errorln("Pod halt failed") } else if !success { logger.NoFields().Warnln("One or more launchables did not halt successfully") } } ok, err := pod.Launch(newManifest) if err != nil { logger.WithFields(logrus.Fields{ "err": err, }).Errorln("Launch failed") } else { duration, err := p.store.SetPod(realityPath, *newManifest) if err != nil { logger.WithFields(logrus.Fields{ "err": err, "duration": duration, }).Errorln("Could not set pod in reality store") } p.tryRunHooks(hooks.AFTER_LAUNCH, pod, newManifest, logger) } return err == nil && ok } // TODO: shut down removed launchables between pod versions. return true }