示例#1
0
文件: fields.go 项目: petertseng/p2
// UnmarshalJSON implements the json.Unmarshaler interface for deserializing the JSON
// representation of an DS. Lets json interface know how to unmarshal a DaemonSet
// Can be called using json.Unmarshal
func (ds *DaemonSet) UnmarshalJSON(b []byte) error {
	var rawDS RawDaemonSet
	if err := json.Unmarshal(b, &rawDS); err != nil {
		return err
	}

	var podManifest manifest.Manifest
	if rawDS.Manifest != "" {
		var err error
		podManifest, err = manifest.FromBytes([]byte(rawDS.Manifest))
		if err != nil {
			return err
		}
	}

	nodeSelector, err := labels.Parse(rawDS.NodeSelector)
	if err != nil {
		return err
	}

	*ds = DaemonSet{
		ID:           rawDS.ID,
		Disabled:     rawDS.Disabled,
		Manifest:     podManifest,
		MinHealth:    rawDS.MinHealth,
		Name:         rawDS.Name,
		NodeSelector: nodeSelector,
		PodID:        rawDS.PodID,
		Timeout:      rawDS.Timeout,
	}
	return nil
}
示例#2
0
文件: fields.go 项目: petertseng/p2
// UnmarshalJSON implements the json.Unmarshaler interface for deserializing the JSON
// representation of an RC.
func (rc *RC) UnmarshalJSON(b []byte) error {
	var rawRC RawRC
	if err := json.Unmarshal(b, &rawRC); err != nil {
		return err
	}

	var m manifest.Manifest
	if rawRC.Manifest != "" {
		var err error
		m, err = manifest.FromBytes([]byte(rawRC.Manifest))
		if err != nil {
			return err
		}
	}

	nodeSel, err := labels.Parse(rawRC.NodeSelector)
	if err != nil {
		return err
	}

	*rc = RC{
		ID:              rawRC.ID,
		Manifest:        m,
		NodeSelector:    nodeSel,
		PodLabels:       rawRC.PodLabels,
		ReplicasDesired: rawRC.ReplicasDesired,
		Disabled:        rawRC.Disabled,
	}
	return nil
}
示例#3
0
func testHookListener(t *testing.T) (HookListener, string, <-chan struct{}) {
	hookPrefix := kp.HOOK_TREE
	destDir, _ := ioutil.TempDir("", "pods")
	defer os.RemoveAll(destDir)
	execDir, err := ioutil.TempDir("", "exec")
	defer os.RemoveAll(execDir)
	Assert(t).IsNil(err, "should not have erred creating a tempdir")

	current, err := user.Current()
	Assert(t).IsNil(err, "test setup: could not get the current user")
	builder := manifest.NewBuilder()
	builder.SetID("users")
	builder.SetRunAsUser(current.Username)
	builder.SetLaunchables(map[launch.LaunchableID]launch.LaunchableStanza{
		"create": {
			Location:       util.From(runtime.Caller(0)).ExpandPath("hoisted-hello_def456.tar.gz"),
			LaunchableType: "hoist",
			LaunchableId:   "create",
		},
	})
	podManifest := builder.GetManifest()
	manifestBytes, err := podManifest.Marshal()
	Assert(t).IsNil(err, "manifest bytes error should have been nil")

	fakeSigner, err := openpgp.NewEntity("p2", "p2-test", "*****@*****.**", nil)
	Assert(t).IsNil(err, "NewEntity error should have been nil")

	var buf bytes.Buffer
	sigWriter, err := clearsign.Encode(&buf, fakeSigner.PrivateKey, nil)
	Assert(t).IsNil(err, "clearsign encode error should have been nil")

	sigWriter.Write(manifestBytes)
	sigWriter.Close()

	podManifest, err = manifest.FromBytes(buf.Bytes())
	Assert(t).IsNil(err, "should have generated manifest from signed bytes")

	fakeIntent := fakeStoreWithManifests(kp.ManifestResult{
		Manifest: podManifest,
	})

	hookFactory := pods.NewHookFactory(destDir, "testNode")

	listener := HookListener{
		Intent:           fakeIntent,
		HookPrefix:       hookPrefix,
		ExecDir:          execDir,
		HookFactory:      hookFactory,
		Logger:           logging.DefaultLogger,
		authPolicy:       auth.FixedKeyringPolicy{openpgp.EntityList{fakeSigner}, nil},
		artifactVerifier: auth.NopVerifier(),
		artifactRegistry: artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector),
	}

	return listener, destDir, fakeIntent.quit
}
示例#4
0
文件: main.go 项目: petertseng/p2
// SumBytes parses the given contents of a manifest file and returns its canonical hash.
func SumBytes(data []byte) HashErr {
	m, err := manifest.FromBytes(data)
	if err != nil {
		return HashErr{"", err}
	}
	sha, err := m.SHA()
	if err != nil {
		return HashErr{"", err}
	}
	return HashErr{sha, nil}
}
示例#5
0
func (p *Pod) UnmarshalJSON(b []byte) error {
	var rawPod RawPod
	if err := json.Unmarshal(b, &rawPod); err != nil {
		return err
	}

	m, err := manifest.FromBytes([]byte(rawPod.Manifest))
	if err != nil {
		return err
	}

	p.Manifest = m
	p.Node = rawPod.Node
	return nil
}
示例#6
0
文件: kv.go 项目: drcapulet/p2
// Pod reads a pod manifest from the key-value store. If the given key does not
// exist, a nil *PodManifest will be returned, along with a pods.NoCurrentManifest
// error.
func (c consulStore) Pod(podPrefix PodPrefix, nodename types.NodeName, podId types.PodID) (manifest.Manifest, time.Duration, error) {
	key, err := podPath(podPrefix, nodename, podId)
	if err != nil {
		return nil, 0, err
	}

	kvPair, writeMeta, err := c.client.KV().Get(key, nil)
	if err != nil {
		return nil, 0, consulutil.NewKVError("get", key, err)
	}
	if kvPair == nil {
		return nil, writeMeta.RequestTime, pods.NoCurrentManifest
	}
	manifest, err := manifest.FromBytes(kvPair.Value)
	return manifest, writeMeta.RequestTime, err
}
示例#7
0
文件: kv.go 项目: drcapulet/p2
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) {
	podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key)
	if err != nil {
		return ManifestResult{}, err
	}

	var podManifest manifest.Manifest
	var node types.NodeName
	if podUniqueKey != nil {
		var podIndex podstore.PodIndex
		err := json.Unmarshal(pair.Value, &podIndex)
		if err != nil {
			return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key)
		}

		// TODO: add caching to pod store, since we're going to be doing a
		// query per index now. Or wait til consul 0.7 and use batch fetch
		pod, err := c.podStore.ReadPodFromIndex(podIndex)
		if err != nil {
			return ManifestResult{}, err
		}

		podManifest = pod.Manifest
		node = pod.Node
	} else {
		podManifest, err = manifest.FromBytes(pair.Value)
		if err != nil {
			return ManifestResult{}, err
		}

		node, err = extractNodeFromKey(pair.Key)
		if err != nil {
			return ManifestResult{}, err
		}
	}

	return ManifestResult{
		Manifest: podManifest,
		PodLocation: types.PodLocation{
			Node:  node,
			PodID: podManifest.ID(),
		},
		PodUniqueKey: podUniqueKey,
	}, nil
}
示例#8
0
文件: kv.go 项目: petertseng/p2
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) {
	// As we transition from legacy pods to uuid pods, the /intent and
	// /reality trees will contain both manifests (as they always have) and
	// uuids which refer to consul objects elsewhere in KV tree. Therefore
	// we have to be able to tell which it is based on the key path
	podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key)
	if err != nil {
		return ManifestResult{}, err
	}

	var podManifest manifest.Manifest
	var node types.NodeName
	if podUniqueKey != "" {
		var podIndex podstore.PodIndex
		err := json.Unmarshal(pair.Value, &podIndex)
		if err != nil {
			return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key)
		}

		podManifest, node, err = c.manifestAndNodeFromIndex(pair)
		if err != nil {
			return ManifestResult{}, err
		}
	} else {
		podManifest, err = manifest.FromBytes(pair.Value)
		if err != nil {
			return ManifestResult{}, err
		}

		node, err = extractNodeFromKey(pair.Key)
		if err != nil {
			return ManifestResult{}, err
		}
	}

	return ManifestResult{
		Manifest: podManifest,
		PodLocation: types.PodLocation{
			Node:  node,
			PodID: podManifest.ID(),
		},
		PodUniqueKey: podUniqueKey,
	}, nil
}
示例#9
0
文件: kv.go 项目: petertseng/p2
// Now both pod manifests and indexes may be present in the /intent and
// /reality trees. When an index is present in the /intent tree, the pod store
// must be consulted with the PodUniqueKey to get the manifest the pod was
// scheduled with. In the /reality tree, the pod status store is instead
// consulted.  This function wraps the logic to fetch from the correct place
// based on the key namespace
func (c consulStore) manifestAndNodeFromIndex(pair *api.KVPair) (manifest.Manifest, types.NodeName, error) {
	var podIndex podstore.PodIndex
	err := json.Unmarshal(pair.Value, &podIndex)
	if err != nil {
		return nil, "", util.Errorf("Could not parse '%s' as pod index", pair.Key)
	}

	switch {
	case strings.HasPrefix(pair.Key, INTENT_TREE.String()):
		// fetch from pod store
		// TODO: add caching to pod store, since we're going to be doing a
		// query per index now. Or wait til consul 0.7 and use batch fetch
		pod, err := c.podStore.ReadPodFromIndex(podIndex)
		if err != nil {
			return nil, "", err
		}

		return pod.Manifest, pod.Node, nil
	case strings.HasPrefix(pair.Key, REALITY_TREE.String()):
		status, _, err := c.podStatusStore.GetStatusFromIndex(podIndex)
		if err != nil {
			return nil, "", err
		}
		manifest, err := manifest.FromBytes([]byte(status.Manifest))
		if err != nil {
			return nil, "", err
		}

		// We don't write the node into the pod status object, but we can
		// infer it from the key anyway
		node, err := extractNodeFromKey(pair.Key)
		if err != nil {
			return nil, "", err
		}
		return manifest, node, nil
	default:
		return nil, "", util.Errorf("Cannot determine key prefix for %s, expected %s or %s", pair.Key, INTENT_TREE, REALITY_TREE)
	}
}
示例#10
0
文件: main.go 项目: petertseng/p2
func main() {
	kingpin.MustParse(app.Parse(os.Args[1:]))
	logger := log.New(os.Stderr, progName+": ", 0)

	var data []byte
	var err error
	if *filename == "" || *filename == "-" {
		data, err = ioutil.ReadAll(os.Stdin)
	} else {
		data, err = ioutil.ReadFile(*filename)
	}
	if err != nil {
		logger.Fatalln(err)
	}

	m, err := manifest.FromBytes(data)
	if err != nil {
		logger.Fatalln(err)
	}
	err = m.GetBuilder().GetManifest().Write(os.Stdout)
	if err != nil {
		logger.Fatalln(err)
	}
}
示例#11
0
文件: podstore.go 项目: petertseng/p2
func (s store) SchedulePod(_ context.Context, req *podstore_protos.SchedulePodRequest) (*podstore_protos.SchedulePodResponse, error) {
	if req.NodeName == "" {
		return nil, grpc.Errorf(codes.InvalidArgument, "node_name must be provided")
	}

	if req.Manifest == "" {
		return nil, grpc.Errorf(codes.InvalidArgument, "manifest must be provided")
	}

	manifest, err := manifest.FromBytes([]byte(req.Manifest))
	if err != nil {
		return nil, grpc.Errorf(codes.InvalidArgument, "could not parse passed manifest: %s", err)
	}

	podUniqueKey, err := s.scheduler.Schedule(manifest, types.NodeName(req.NodeName))
	if err != nil {
		return nil, grpc.Errorf(codes.Unavailable, "could not schedule pod: %s", err)
	}

	resp := &podstore_protos.SchedulePodResponse{
		PodUniqueKey: podUniqueKey.String(),
	}
	return resp, nil
}
示例#12
0
文件: setup.go 项目: petertseng/p2
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) {
	addHooks(preparerConfig, logger)

	if preparerConfig.ConsulAddress == "" {
		return nil, util.Errorf("No Consul address given to the preparer")
	}
	if preparerConfig.PodRoot == "" {
		return nil, util.Errorf("No pod root given to the preparer")
	}

	if preparerConfig.LogLevel != "" {
		lv, err := logrus.ParseLevel(preparerConfig.LogLevel)
		if err != nil {
			return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel)
		}
		logger.Logger.Level = lv
	}

	authPolicy, err := getDeployerAuth(preparerConfig)
	if err != nil {
		return nil, err
	}

	artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger)
	if err != nil {
		return nil, err
	}

	artifactRegistry, err := getArtifactRegistry(preparerConfig)
	if err != nil {
		return nil, err
	}

	client, err := preparerConfig.GetConsulClient()
	if err != nil {
		return nil, err
	}

	statusStore := statusstore.NewConsul(client)
	podStatusStore := podstatus.NewConsul(statusStore, kp.PreparerPodStatusNamespace)
	podStore := podstore.NewConsul(client.KV())

	store := kp.NewConsulStore(client)

	maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage
	if preparerConfig.MaxLaunchableDiskUsage != "" {
		maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage)
		if err != nil {
			return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err)
		}
	}

	err = os.MkdirAll(preparerConfig.PodRoot, 0755)
	if err != nil {
		return nil, util.Errorf("Could not create preparer pod directory: %s", err)
	}

	var logExec []string
	if len(preparerConfig.LogExec) > 0 {
		logExec = preparerConfig.LogExec
	} else {
		logExec = runit.DefaultLogExec()
	}

	finishExec := pods.NopFinishExec
	var podProcessReporter *podprocess.Reporter
	if preparerConfig.PodProcessReporterConfig.FullyConfigured() {
		podProcessReporterLogger := logger.SubLogger(logrus.Fields{
			"component": "PodProcessReporter",
		})

		podProcessReporter, err = podprocess.New(preparerConfig.PodProcessReporterConfig, podProcessReporterLogger, podStatusStore)
		if err != nil {
			return nil, err
		}

		finishExec = preparerConfig.PodProcessReporterConfig.FinishExec()
	}

	var hooksManifest manifest.Manifest
	var hooksPod *pods.Pod
	if preparerConfig.HooksManifest != NoHooksSentinelValue {
		if preparerConfig.HooksManifest == "" {
			return nil, util.Errorf("Most provide a hooks_manifest or sentinel value %q to indicate that there are no hooks", NoHooksSentinelValue)
		}

		hooksManifest, err = manifest.FromBytes([]byte(preparerConfig.HooksManifest))
		if err != nil {
			return nil, util.Errorf("Could not parse configured hooks manifest: %s", err)
		}
		hooksPodFactory := pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName)
		hooksPod = hooksPodFactory.NewHookPod(hooksManifest.ID())
	}
	return &Preparer{
		node:                   preparerConfig.NodeName,
		store:                  store,
		hooks:                  hooks.Hooks(preparerConfig.HooksDirectory, preparerConfig.PodRoot, &logger),
		podStatusStore:         podStatusStore,
		podStore:               podStore,
		Logger:                 logger,
		podFactory:             pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName),
		authPolicy:             authPolicy,
		maxLaunchableDiskUsage: maxLaunchableDiskUsage,
		finishExec:             finishExec,
		logExec:                logExec,
		logBridgeBlacklist:     preparerConfig.LogBridgeBlacklist,
		artifactVerifier:       artifactVerifier,
		artifactRegistry:       artifactRegistry,
		PodProcessReporter:     podProcessReporter,
		hooksManifest:          hooksManifest,
		hooksPod:               hooksPod,
		hooksExecDir:           preparerConfig.HooksDirectory,
	}, nil
}
示例#13
0
文件: pod_test.go 项目: rudle/p2
func TestPodSetupConfigWritesFiles(t *testing.T) {
	manifestStr := `id: thepod
launchables:
  my-app:
    launchable_type: hoist
    launchable_id: web
    location: https://localhost:4444/foo/bar/baz_3c021aff048ca8117593f9c71e03b87cf72fd440.tar.gz
    cgroup:
      cpus: 4
      memory: 4G
    env:
      ENABLED_BLAMS: 5
config:
  ENVIRONMENT: staging
`
	currUser, err := user.Current()
	Assert(t).IsNil(err, "Could not get the current user")
	manifestStr += fmt.Sprintf("run_as: %s", currUser.Username)
	manifest, err := manifest.FromBytes(bytes.NewBufferString(manifestStr).Bytes())
	Assert(t).IsNil(err, "should not have erred reading the manifest")

	podTemp, _ := ioutil.TempDir("", "pod")

	podFactory := NewFactory(podTemp, "testNode")
	pod := podFactory.NewPod(manifest.ID())

	launchables := make([]launch.Launchable, 0)
	for _, stanza := range manifest.GetLaunchableStanzas() {
		launchable, err := pod.getLaunchable(stanza, manifest.RunAsUser(), manifest.GetRestartPolicy())
		Assert(t).IsNil(err, "There shouldn't have been an error getting launchable")
		launchables = append(launchables, launchable)
	}
	Assert(t).IsTrue(len(launchables) > 0, "Test setup error: no launchables from launchable stanzas")

	err = pod.setupConfig(manifest, launchables)
	Assert(t).IsNil(err, "There shouldn't have been an error setting up config")

	configFileName, err := manifest.ConfigFileName()
	Assert(t).IsNil(err, "Couldn't generate config filename")
	configPath := filepath.Join(pod.ConfigDir(), configFileName)
	config, err := ioutil.ReadFile(configPath)
	Assert(t).IsNil(err, "should not have erred reading the config")
	Assert(t).AreEqual("ENVIRONMENT: staging\n", string(config), "the config didn't match")

	env, err := ioutil.ReadFile(filepath.Join(pod.EnvDir(), "CONFIG_PATH"))
	Assert(t).IsNil(err, "should not have erred reading the env file")
	Assert(t).AreEqual(configPath, string(env), "The env path to config didn't match")

	platformConfigFileName, err := manifest.PlatformConfigFileName()
	Assert(t).IsNil(err, "Couldn't generate platform config filename")
	platformConfigPath := filepath.Join(pod.ConfigDir(), platformConfigFileName)
	platConfig, err := ioutil.ReadFile(platformConfigPath)
	Assert(t).IsNil(err, "should not have erred reading the platform config")

	expectedPlatConfig := `web:
  cgroup:
    cpus: 4
    memory: 4294967296
`
	Assert(t).AreEqual(expectedPlatConfig, string(platConfig), "the platform config didn't match")

	platEnv, err := ioutil.ReadFile(filepath.Join(pod.EnvDir(), "PLATFORM_CONFIG_PATH"))
	Assert(t).IsNil(err, "should not have erred reading the platform config env file")
	Assert(t).AreEqual(platformConfigPath, string(platEnv), "The env path to platform config didn't match")

	for _, launchable := range launchables {
		launchableIdEnv, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "LAUNCHABLE_ID"))
		Assert(t).IsNil(err, "should not have erred reading the launchable ID env file")

		if launchable.ID().String() != string(launchableIdEnv) {
			t.Errorf("Launchable Id did not have expected value: wanted '%s' was '%s'", launchable.ID().String(), launchableIdEnv)
		}

		launchableRootEnv, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "LAUNCHABLE_ROOT"))
		Assert(t).IsNil(err, "should not have erred reading the launchable root env file")
		Assert(t).AreEqual(launchable.InstallDir(), string(launchableRootEnv), "The launchable root path did not match expected")

		enableBlamSetting, err := ioutil.ReadFile(filepath.Join(launchable.EnvDir(), "ENABLED_BLAMS"))
		Assert(t).IsNil(err, "should not have erred reading custom env var")
		Assert(t).AreEqual("5", string(enableBlamSetting), "The user-supplied custom env var was wrong")
	}
}
示例#14
0
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan ManifestPair, quit <-chan struct{}) {
	// install new launchables
	var nextLaunch ManifestPair

	// used to track if we have work to do (i.e. pod manifest came through channel
	// and we have yet to operate on it)
	working := false
	var manifestLogger logging.Logger

	// The design of p2-preparer is to continuously retry installation
	// failures, for example downloading of the launchable. An exponential
	// backoff is important to avoid putting undue load on the artifact
	// server, for example.
	backoffTime := minimumBackoffTime
	for {
		select {
		case <-quit:
			return
		case nextLaunch = <-podChan:
			backoffTime = minimumBackoffTime
			var sha string

			// TODO: handle errors appropriately from SHA().
			if nextLaunch.Intent != nil {
				sha, _ = nextLaunch.Intent.SHA()
			} else {
				sha, _ = nextLaunch.Reality.SHA()
			}
			manifestLogger = p.Logger.SubLogger(logrus.Fields{
				"pod":            nextLaunch.ID,
				"sha":            sha,
				"pod_unique_key": nextLaunch.PodUniqueKey,
			})
			manifestLogger.NoFields().Debugln("New manifest received")

			working = true
		case <-time.After(backoffTime):
			if working {
				var pod *pods.Pod
				var err error
				if nextLaunch.PodUniqueKey == "" {
					pod = p.podFactory.NewLegacyPod(nextLaunch.ID)
				} else {
					pod, err = p.podFactory.NewUUIDPod(nextLaunch.ID, nextLaunch.PodUniqueKey)
					if err != nil {
						manifestLogger.WithError(err).Errorln("Could not initialize pod")
						break
					}
				}

				// TODO better solution: force the preparer to have a 0s default timeout, prevent KILLs
				if pod.Id == constants.PreparerPodID {
					pod.DefaultTimeout = time.Duration(0)
				}

				effectiveLogBridgeExec := p.logExec
				// pods that are in the blacklist for this preparer shall not use the
				// preparer's log exec. Instead, they will use the default svlogd logexec.
				for _, podID := range p.logBridgeBlacklist {
					if pod.Id.String() == podID {
						effectiveLogBridgeExec = svlogdExec
						break
					}
				}
				pod.SetLogBridgeExec(effectiveLogBridgeExec)

				pod.SetFinishExec(p.finishExec)

				// podChan is being fed values gathered from a kp.Watch() in
				// WatchForPodManifestsForNode(). If the watch returns a new pair of
				// intent/reality values before the previous change has finished
				// processing in resolvePair(), the reality value will be stale. This
				// leads to a bug where the preparer will appear to update a package
				// and when that is finished, "update" it again.
				//
				// Example ordering of bad events:
				// 1) update to /intent for pod A comes in, /reality is read and
				// resolvePair() handles it
				// 2) before resolvePair() finishes, another /intent update comes in,
				// and /reality is read but hasn't been changed. This update cannot
				// be processed until the previous resolvePair() call finishes, and
				// updates /reality. Now the reality value used here is stale. We
				// want to refresh our /reality read so we don't restart the pod if
				// intent didn't change between updates.
				//
				// The correct solution probably involves watching reality and intent
				// and feeding updated pairs to a control loop.
				//
				// This is a quick fix to ensure that the reality value being used is
				// up-to-date. The de-bouncing logic in this method should ensure that the
				// intent value is fresh (to the extent that Consul is timely). Fetching
				// the reality value again ensures its freshness too.
				if nextLaunch.PodUniqueKey == "" {
					// legacy pod, get reality manifest from reality tree
					reality, _, err := p.store.Pod(kp.REALITY_TREE, p.node, nextLaunch.ID)
					if err == pods.NoCurrentManifest {
						nextLaunch.Reality = nil
					} else if err != nil {
						manifestLogger.WithError(err).Errorln("Error getting reality manifest")
						break
					} else {
						nextLaunch.Reality = reality
					}
				} else {
					// uuid pod, get reality manifest from pod status
					status, _, err := p.podStatusStore.Get(nextLaunch.PodUniqueKey)
					switch {
					case err != nil && !statusstore.IsNoStatus(err):
						manifestLogger.WithError(err).Errorln("Error getting reality manifest from pod status")
						break
					case statusstore.IsNoStatus(err):
						nextLaunch.Reality = nil
					default:
						manifest, err := manifest.FromBytes([]byte(status.Manifest))
						if err != nil {
							manifestLogger.WithError(err).Errorln("Error parsing reality manifest from pod status")
							break
						}
						nextLaunch.Reality = manifest
					}
				}

				ok := p.resolvePair(nextLaunch, pod, manifestLogger)
				if ok {
					nextLaunch = ManifestPair{}
					working = false

					// Reset the backoff time
					backoffTime = minimumBackoffTime
				} else {
					// Double the backoff time with a maximum of 1 minute
					backoffTime = backoffTime * 2
					if backoffTime > 1*time.Minute {
						backoffTime = 1 * time.Minute
					}
				}
			}
		}
	}
}