func installBaseAgent(podFactory pods.Factory, agentManifest manifest.Manifest, registryURL *url.URL) error { agentPod := podFactory.NewPod(agentManifest.ID()) err := agentPod.Install(agentManifest, auth.NopVerifier(), artifact.NewRegistry(registryURL, uri.DefaultFetcher, osversion.DefaultDetector)) if err != nil { return err } _, err = agentPod.Launch(agentManifest) return err }
func checkManifestPodID(dsPodID types.PodID, manifest manifest.Manifest) error { if dsPodID == "" { return util.Errorf("Daemon set must have a pod id") } if manifest.ID() == "" { return util.Errorf("Daemon set manifest must have a pod id") } if dsPodID != manifest.ID() { return util.Errorf("Daemon set pod id must match manifest pod id. Wanted '%s', got '%s'", dsPodID, manifest.ID()) } return nil }
// SetPod writes a pod manifest into the consul key-value store. func (c consulStore) SetPod(podPrefix PodPrefix, nodename types.NodeName, manifest manifest.Manifest) (time.Duration, error) { buf := bytes.Buffer{} err := manifest.Write(&buf) if err != nil { return 0, err } key, err := podPath(podPrefix, nodename, manifest.ID()) if err != nil { return 0, err } keyPair := &api.KVPair{ Key: key, Value: buf.Bytes(), } writeMeta, err := c.client.KV().Put(keyPair, nil) var retDur time.Duration if writeMeta != nil { retDur = writeMeta.RequestTime } if err != nil { return retDur, consulutil.NewKVError("put", key, err) } return retDur, nil }
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) { podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key) if err != nil { return ManifestResult{}, err } var podManifest manifest.Manifest var node types.NodeName if podUniqueKey != nil { var podIndex podstore.PodIndex err := json.Unmarshal(pair.Value, &podIndex) if err != nil { return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key) } // TODO: add caching to pod store, since we're going to be doing a // query per index now. Or wait til consul 0.7 and use batch fetch pod, err := c.podStore.ReadPodFromIndex(podIndex) if err != nil { return ManifestResult{}, err } podManifest = pod.Manifest node = pod.Node } else { podManifest, err = manifest.FromBytes(pair.Value) if err != nil { return ManifestResult{}, err } node, err = extractNodeFromKey(pair.Key) if err != nil { return ManifestResult{}, err } } return ManifestResult{ Manifest: podManifest, PodLocation: types.PodLocation{ Node: node, PodID: podManifest.ID(), }, PodUniqueKey: podUniqueKey, }, nil }
func (h *HookDir) RunHookType(hookType HookType, pod Pod, manifest manifest.Manifest) error { logger := h.logger.SubLogger(logrus.Fields{ "pod": manifest.ID(), "pod_path": pod.Home(), "event": hookType.String(), }) logger.NoFields().Infof("Running %s hooks", hookType.String()) return h.runHooks(h.dirpath, hookType, pod, manifest, logger) }
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) { // As we transition from legacy pods to uuid pods, the /intent and // /reality trees will contain both manifests (as they always have) and // uuids which refer to consul objects elsewhere in KV tree. Therefore // we have to be able to tell which it is based on the key path podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key) if err != nil { return ManifestResult{}, err } var podManifest manifest.Manifest var node types.NodeName if podUniqueKey != "" { var podIndex podstore.PodIndex err := json.Unmarshal(pair.Value, &podIndex) if err != nil { return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key) } podManifest, node, err = c.manifestAndNodeFromIndex(pair) if err != nil { return ManifestResult{}, err } } else { podManifest, err = manifest.FromBytes(pair.Value) if err != nil { return ManifestResult{}, err } node, err = extractNodeFromKey(pair.Key) if err != nil { return ManifestResult{}, err } } return ManifestResult{ Manifest: podManifest, PodLocation: types.PodLocation{ Node: node, PodID: podManifest.ID(), }, PodUniqueKey: podUniqueKey, }, nil }
func (h *HookDir) runHooks(dirpath string, hType HookType, pod Pod, podManifest manifest.Manifest, logger logging.Logger) error { configFileName, err := podManifest.ConfigFileName() if err != nil { return err } // Write manifest to a file so hooks can read it. tmpManifestFile, err := ioutil.TempFile("", fmt.Sprintf("%s-manifest.yaml", podManifest.ID())) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "dir": dirpath, }).Warnln("Unable to open manifest file for hooks") return err } defer os.Remove(tmpManifestFile.Name()) err = podManifest.Write(tmpManifestFile) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "dir": dirpath, }).Warnln("Unable to write manifest file for hooks") return err } hookEnvironment := []string{ fmt.Sprintf("%s=%s", HOOK_ENV_VAR, path.Base(dirpath)), fmt.Sprintf("%s=%s", HOOK_EVENT_ENV_VAR, hType.String()), fmt.Sprintf("%s=%s", HOOKED_NODE_ENV_VAR, pod.Node()), fmt.Sprintf("%s=%s", HOOKED_POD_ID_ENV_VAR, podManifest.ID()), fmt.Sprintf("%s=%s", HOOKED_POD_HOME_ENV_VAR, pod.Home()), fmt.Sprintf("%s=%s", HOOKED_POD_MANIFEST_ENV_VAR, tmpManifestFile.Name()), fmt.Sprintf("%s=%s", HOOKED_CONFIG_PATH_ENV_VAR, path.Join(pod.ConfigDir(), configFileName)), fmt.Sprintf("%s=%s", HOOKED_ENV_PATH_ENV_VAR, pod.EnvDir()), fmt.Sprintf("%s=%s", HOOKED_CONFIG_DIR_PATH_ENV_VAR, pod.ConfigDir()), fmt.Sprintf("%s=%s", HOOKED_SYSTEM_POD_ROOT_ENV_VAR, h.podRoot), } return runDirectory(dirpath, hookEnvironment, logger) }
func createRC( rcs rcstore.Store, applicator labels.Applicator, manifest manifest.Manifest, desired int, nodes map[types.NodeName]bool, ) (rc_fields.RC, error) { created, err := rcs.Create(manifest, nil, nil) if err != nil { return rc_fields.RC{}, fmt.Errorf("Error creating RC: %s", err) } podID := string(manifest.ID()) for node := range nodes { if err = applicator.SetLabel(labels.POD, node.String()+"/"+podID, rc.RCIDLabel, string(created.ID)); err != nil { return rc_fields.RC{}, fmt.Errorf("Error applying RC ID label: %s", err) } } return created, rcs.SetDesiredReplicas(created.ID, desired) }
// Transfers the named node from the old RC to the new RC func transferNode(node types.NodeName, manifest manifest.Manifest, upd update) error { if _, err := upd.kps.SetPod(kp.REALITY_TREE, node, manifest); err != nil { return err } return upd.labeler.SetLabel(labels.POD, labels.MakePodLabelKey(node, manifest.ID()), rc.RCIDLabel, string(upd.NewRC)) }
func main() { kingpin.Version(version.VERSION) kingpin.Parse() log.Println("Starting bootstrap") hostname, err := os.Hostname() if err != nil { log.Fatalf("error getting node name: %v", err) } nodeName := types.NodeName(hostname) agentManifest, err := manifest.FromPath(*agentManifestPath) if err != nil { log.Fatalln("Could not get agent manifest: %s", err) } log.Println("Installing and launching consul") podFactory := pods.NewFactory(*podRoot, nodeName) var consulPod *pods.Pod var consulManifest manifest.Manifest if *existingConsul == "" { consulManifest, err = manifest.FromPath(*consulManifestPath) if err != nil { log.Fatalf("Could not get consul manifest: %s", err) } // Consul will never have a uuid (for now) consulPod = podFactory.NewLegacyPod(consulManifest.ID()) err = installConsul(consulPod, consulManifest, *registryURL) if err != nil { log.Fatalf("Could not install consul: %s", err) } } else { log.Printf("Using existing Consul at %s\n", *existingConsul) consulPod, err = pods.PodFromPodHome(nodeName, *existingConsul) if err != nil { log.Fatalf("The existing consul pod is invalid: %s", err) } consulManifest, err = consulPod.CurrentManifest() if err != nil { log.Fatalf("Cannot get the current consul manifest: %s", err) } } if err = verifyConsulUp(*timeout); err != nil { log.Fatalln(err) } time.Sleep(500 * time.Millisecond) // schedule consul in the reality store as well, to ensure the preparers do // not all restart their consul agents simultaneously after bootstrapping err = scheduleForThisHost(consulManifest, true) if err != nil { log.Fatalf("Could not register consul in the intent store: %s", err) } log.Println("Registering base agent in consul") err = scheduleForThisHost(agentManifest, false) if err != nil { log.Fatalf("Could not register base agent with consul: %s", err) } log.Println("Installing and launching base agent") err = installBaseAgent(podFactory, agentManifest, *registryURL) if err != nil { log.Fatalf("Could not install base agent: %s", err) } if err := verifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil { log.Fatalln(err) } log.Println("Bootstrapping complete") }
func (f *FakePodStore) SetPod(podPrefix kp.PodPrefix, hostname types.NodeName, manifest manifest.Manifest) (time.Duration, error) { f.podLock.Lock() defer f.podLock.Unlock() f.podResults[FakePodStoreKeyFor(podPrefix, hostname, manifest.ID())] = manifest return 0, nil }
func (s *fakeKpStore) SetPod(podPrefix kp.PodPrefix, nodeName types.NodeName, manifest manifest.Manifest) (time.Duration, error) { key := path.Join(string(podPrefix), nodeName.String(), string(manifest.ID())) s.manifests[key] = manifest return 0, nil }
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) { addHooks(preparerConfig, logger) if preparerConfig.ConsulAddress == "" { return nil, util.Errorf("No Consul address given to the preparer") } if preparerConfig.PodRoot == "" { return nil, util.Errorf("No pod root given to the preparer") } if preparerConfig.LogLevel != "" { lv, err := logrus.ParseLevel(preparerConfig.LogLevel) if err != nil { return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel) } logger.Logger.Level = lv } authPolicy, err := getDeployerAuth(preparerConfig) if err != nil { return nil, err } artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger) if err != nil { return nil, err } artifactRegistry, err := getArtifactRegistry(preparerConfig) if err != nil { return nil, err } client, err := preparerConfig.GetConsulClient() if err != nil { return nil, err } statusStore := statusstore.NewConsul(client) podStatusStore := podstatus.NewConsul(statusStore, kp.PreparerPodStatusNamespace) podStore := podstore.NewConsul(client.KV()) store := kp.NewConsulStore(client) maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage if preparerConfig.MaxLaunchableDiskUsage != "" { maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage) if err != nil { return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err) } } err = os.MkdirAll(preparerConfig.PodRoot, 0755) if err != nil { return nil, util.Errorf("Could not create preparer pod directory: %s", err) } var logExec []string if len(preparerConfig.LogExec) > 0 { logExec = preparerConfig.LogExec } else { logExec = runit.DefaultLogExec() } finishExec := pods.NopFinishExec var podProcessReporter *podprocess.Reporter if preparerConfig.PodProcessReporterConfig.FullyConfigured() { podProcessReporterLogger := logger.SubLogger(logrus.Fields{ "component": "PodProcessReporter", }) podProcessReporter, err = podprocess.New(preparerConfig.PodProcessReporterConfig, podProcessReporterLogger, podStatusStore) if err != nil { return nil, err } finishExec = preparerConfig.PodProcessReporterConfig.FinishExec() } var hooksManifest manifest.Manifest var hooksPod *pods.Pod if preparerConfig.HooksManifest != NoHooksSentinelValue { if preparerConfig.HooksManifest == "" { return nil, util.Errorf("Most provide a hooks_manifest or sentinel value %q to indicate that there are no hooks", NoHooksSentinelValue) } hooksManifest, err = manifest.FromBytes([]byte(preparerConfig.HooksManifest)) if err != nil { return nil, util.Errorf("Could not parse configured hooks manifest: %s", err) } hooksPodFactory := pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName) hooksPod = hooksPodFactory.NewHookPod(hooksManifest.ID()) } return &Preparer{ node: preparerConfig.NodeName, store: store, hooks: hooks.Hooks(preparerConfig.HooksDirectory, preparerConfig.PodRoot, &logger), podStatusStore: podStatusStore, podStore: podStore, Logger: logger, podFactory: pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName), authPolicy: authPolicy, maxLaunchableDiskUsage: maxLaunchableDiskUsage, finishExec: finishExec, logExec: logExec, logBridgeBlacklist: preparerConfig.LogBridgeBlacklist, artifactVerifier: artifactVerifier, artifactRegistry: artifactRegistry, PodProcessReporter: podProcessReporter, hooksManifest: hooksManifest, hooksPod: hooksPod, hooksExecDir: preparerConfig.HooksDirectory, }, nil }
// setupConfig does the following: // // 1) creates a directory in the pod's home directory called "config" which // contains YAML configuration files (named with pod's ID and the SHA of its // manifest's content) the path to which will be exported to a pods launchables // via the CONFIG_PATH environment variable // // 2) writes an "env" directory in the pod's home directory called "env" which // contains environment variables written as files that will be exported to all // processes started by all launchables (as described in // http://smarden.org/runit/chpst.8.html, with the -e option), including // CONFIG_PATH // // 3) writes an "env" directory for each launchable. The "env" directory // contains environment files specific to a launchable (such as // LAUNCHABLE_ROOT) // // We may wish to provide a "config" directory per launchable at some point as // well, so that launchables can have different config namespaces func (pod *Pod) setupConfig(manifest manifest.Manifest, launchables []launch.Launchable) error { uid, gid, err := user.IDs(manifest.RunAsUser()) if err != nil { return util.Errorf("Could not determine pod UID/GID: %s", err) } var configData bytes.Buffer err = manifest.WriteConfig(&configData) if err != nil { return err } var platConfigData bytes.Buffer err = manifest.WritePlatformConfig(&platConfigData) if err != nil { return err } err = util.MkdirChownAll(pod.ConfigDir(), uid, gid, 0755) if err != nil { return util.Errorf("Could not create config directory for pod %s: %s", manifest.ID(), err) } configFileName, err := manifest.ConfigFileName() if err != nil { return err } configPath := filepath.Join(pod.ConfigDir(), configFileName) err = writeFileChown(configPath, configData.Bytes(), uid, gid) if err != nil { return util.Errorf("Error writing config file for pod %s: %s", manifest.ID(), err) } platConfigFileName, err := manifest.PlatformConfigFileName() if err != nil { return err } platConfigPath := filepath.Join(pod.ConfigDir(), platConfigFileName) err = writeFileChown(platConfigPath, platConfigData.Bytes(), uid, gid) if err != nil { return util.Errorf("Error writing platform config file for pod %s: %s", manifest.ID(), err) } err = util.MkdirChownAll(pod.EnvDir(), uid, gid, 0755) if err != nil { return util.Errorf("Could not create the environment dir for pod %s: %s", manifest.ID(), err) } err = writeEnvFile(pod.EnvDir(), ConfigPathEnvVar, configPath, uid, gid) if err != nil { return err } err = writeEnvFile(pod.EnvDir(), PlatformConfigPathEnvVar, platConfigPath, uid, gid) if err != nil { return err } err = writeEnvFile(pod.EnvDir(), PodHomeEnvVar, pod.Home(), uid, gid) if err != nil { return err } err = writeEnvFile(pod.EnvDir(), PodIDEnvVar, pod.Id.String(), uid, gid) if err != nil { return err } err = writeEnvFile(pod.EnvDir(), PodUniqueKeyEnvVar, pod.uniqueKey.String(), uid, gid) if err != nil { return err } for _, launchable := range launchables { // we need to remove any unset env vars from a previous pod err = os.RemoveAll(launchable.EnvDir()) if err != nil { return err } err = util.MkdirChownAll(launchable.EnvDir(), uid, gid, 0755) if err != nil { return util.Errorf("Could not create the environment dir for pod %s launchable %s: %s", manifest.ID(), launchable.ServiceID(), err) } err = writeEnvFile(launchable.EnvDir(), LaunchableIDEnvVar, launchable.ID().String(), uid, gid) if err != nil { return err } err = writeEnvFile(launchable.EnvDir(), "LAUNCHABLE_ROOT", launchable.InstallDir(), uid, gid) if err != nil { return err } // last, write the user-supplied env variables to ensure priority of user-supplied values for envName, value := range launchable.EnvVars() { err = writeEnvFile(launchable.EnvDir(), envName, fmt.Sprint(value), uid, gid) if err != nil { return err } } } return nil }