Exemple #1
0
// SetPod writes a pod manifest into the consul key-value store.
func (c consulStore) SetPod(podPrefix PodPrefix, nodename types.NodeName, manifest manifest.Manifest) (time.Duration, error) {
	buf := bytes.Buffer{}
	err := manifest.Write(&buf)
	if err != nil {
		return 0, err
	}

	key, err := podPath(podPrefix, nodename, manifest.ID())
	if err != nil {
		return 0, err
	}
	keyPair := &api.KVPair{
		Key:   key,
		Value: buf.Bytes(),
	}

	writeMeta, err := c.client.KV().Put(keyPair, nil)
	var retDur time.Duration
	if writeMeta != nil {
		retDur = writeMeta.RequestTime
	}
	if err != nil {
		return retDur, consulutil.NewKVError("put", key, err)
	}
	return retDur, nil
}
Exemple #2
0
// Write servicebuilder *.yaml file and run servicebuilder, which will register runit services for this
// pod.
func (pod *Pod) buildRunitServices(launchables []launch.Launchable, newManifest manifest.Manifest) error {
	// if the service is new, building the runit services also starts them
	sbTemplate := make(map[string]runit.ServiceTemplate)
	for _, launchable := range launchables {
		executables, err := launchable.Executables(pod.ServiceBuilder)
		if err != nil {
			pod.logLaunchableError(launchable.ServiceID(), err, "Unable to list executables")
			continue
		}
		for _, executable := range executables {
			if _, ok := sbTemplate[executable.Service.Name]; ok {
				return util.Errorf("Duplicate executable %q for launchable %q", executable.Service.Name, launchable.ServiceID())
			}
			sbTemplate[executable.Service.Name] = runit.ServiceTemplate{
				Log:    pod.LogExec,
				Run:    executable.Exec,
				Finish: pod.FinishExecForLaunchable(launchable),
			}
		}
	}
	err := pod.ServiceBuilder.Activate(string(pod.Id), sbTemplate, newManifest.GetRestartPolicy())
	if err != nil {
		return err
	}

	// as with the original servicebuilder, prune after creating
	// new services
	return pod.ServiceBuilder.Prune()
}
Exemple #3
0
func installBaseAgent(podFactory pods.Factory, agentManifest manifest.Manifest, registryURL *url.URL) error {
	agentPod := podFactory.NewPod(agentManifest.ID())
	err := agentPod.Install(agentManifest, auth.NopVerifier(), artifact.NewRegistry(registryURL, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		return err
	}
	_, err = agentPod.Launch(agentManifest)
	return err
}
Exemple #4
0
func (pod *Pod) WriteCurrentManifest(manifest manifest.Manifest) (string, error) {
	// write the old manifest to a temporary location in case a launch fails.
	tmpDir, err := ioutil.TempDir("", "manifests")
	if err != nil {
		return "", util.Errorf("could not create a tempdir to write old manifest: %s", err)
	}
	lastManifest := filepath.Join(tmpDir, "last_manifest.yaml")

	if _, err := os.Stat(pod.currentPodManifestPath()); err == nil {
		podManifestURL, err := url.Parse(pod.currentPodManifestPath())
		if err != nil {
			return "", util.Errorf("Couldn't parse manifest path '%s' as URL: %s", pod.currentPodManifestPath(), err)
		}

		err = uri.URICopy(podManifestURL, lastManifest)
		if err != nil && !os.IsNotExist(err) {
			return "", err
		}
	}

	f, err := os.OpenFile(pod.currentPodManifestPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		pod.logError(err, "Unable to open current manifest file")
		err = pod.revertCurrentManifest(lastManifest)
		if err != nil {
			pod.logError(err, "Couldn't replace old manifest as current")
		}
		return "", err
	}
	defer f.Close()

	err = manifest.Write(f)
	if err != nil {
		pod.logError(err, "Unable to write current manifest file")
		err = pod.revertCurrentManifest(lastManifest)
		if err != nil {
			pod.logError(err, "Couldn't replace old manifest as current")
		}
		return "", err
	}

	uid, gid, err := user.IDs(manifest.RunAsUser())
	if err != nil {
		pod.logError(err, "Unable to find pod UID/GID")
		// the write was still successful so we are not going to revert
		return "", err
	}
	err = f.Chown(uid, gid)
	if err != nil {
		pod.logError(err, "Unable to chown current manifest")
		return "", err
	}

	return lastManifest, nil
}
Exemple #5
0
func checkManifestPodID(dsPodID types.PodID, manifest manifest.Manifest) error {
	if dsPodID == "" {
		return util.Errorf("Daemon set must have a pod id")
	}
	if manifest.ID() == "" {
		return util.Errorf("Daemon set manifest must have a pod id")
	}
	if dsPodID != manifest.ID() {
		return util.Errorf("Daemon set pod id must match manifest pod id. Wanted '%s', got '%s'", dsPodID, manifest.ID())
	}
	return nil
}
Exemple #6
0
func (pod *Pod) Launchables(manifest manifest.Manifest) ([]launch.Launchable, error) {
	launchableStanzas := manifest.GetLaunchableStanzas()
	launchables := make([]launch.Launchable, 0, len(launchableStanzas))

	for launchableID, launchableStanza := range launchableStanzas {
		launchable, err := pod.getLaunchable(launchableID, launchableStanza, manifest.RunAsUser())
		if err != nil {
			return nil, err
		}
		launchables = append(launchables, launchable)
	}

	return launchables, nil
}
Exemple #7
0
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) {
	podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key)
	if err != nil {
		return ManifestResult{}, err
	}

	var podManifest manifest.Manifest
	var node types.NodeName
	if podUniqueKey != nil {
		var podIndex podstore.PodIndex
		err := json.Unmarshal(pair.Value, &podIndex)
		if err != nil {
			return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key)
		}

		// TODO: add caching to pod store, since we're going to be doing a
		// query per index now. Or wait til consul 0.7 and use batch fetch
		pod, err := c.podStore.ReadPodFromIndex(podIndex)
		if err != nil {
			return ManifestResult{}, err
		}

		podManifest = pod.Manifest
		node = pod.Node
	} else {
		podManifest, err = manifest.FromBytes(pair.Value)
		if err != nil {
			return ManifestResult{}, err
		}

		node, err = extractNodeFromKey(pair.Key)
		if err != nil {
			return ManifestResult{}, err
		}
	}

	return ManifestResult{
		Manifest: podManifest,
		PodLocation: types.PodLocation{
			Node:  node,
			PodID: podManifest.ID(),
		},
		PodUniqueKey: podUniqueKey,
	}, nil
}
Exemple #8
0
func (h *HookDir) RunHookType(hookType HookType, pod Pod, manifest manifest.Manifest) error {
	logger := h.logger.SubLogger(logrus.Fields{
		"pod":      manifest.ID(),
		"pod_path": pod.Home(),
		"event":    hookType.String(),
	})
	logger.NoFields().Infof("Running %s hooks", hookType.String())
	return h.runHooks(h.dirpath, hookType, pod, manifest, logger)
}
Exemple #9
0
func (c consulStore) manifestResultFromPair(pair *api.KVPair) (ManifestResult, error) {
	// As we transition from legacy pods to uuid pods, the /intent and
	// /reality trees will contain both manifests (as they always have) and
	// uuids which refer to consul objects elsewhere in KV tree. Therefore
	// we have to be able to tell which it is based on the key path
	podUniqueKey, err := PodUniqueKeyFromConsulPath(pair.Key)
	if err != nil {
		return ManifestResult{}, err
	}

	var podManifest manifest.Manifest
	var node types.NodeName
	if podUniqueKey != "" {
		var podIndex podstore.PodIndex
		err := json.Unmarshal(pair.Value, &podIndex)
		if err != nil {
			return ManifestResult{}, util.Errorf("Could not parse '%s' as pod index", pair.Key)
		}

		podManifest, node, err = c.manifestAndNodeFromIndex(pair)
		if err != nil {
			return ManifestResult{}, err
		}
	} else {
		podManifest, err = manifest.FromBytes(pair.Value)
		if err != nil {
			return ManifestResult{}, err
		}

		node, err = extractNodeFromKey(pair.Key)
		if err != nil {
			return ManifestResult{}, err
		}
	}

	return ManifestResult{
		Manifest: podManifest,
		PodLocation: types.PodLocation{
			Node:  node,
			PodID: podManifest.ID(),
		},
		PodUniqueKey: podUniqueKey,
	}, nil
}
Exemple #10
0
func (pod *Pod) Verify(manifest manifest.Manifest, authPolicy auth.Policy) error {
	for launchableID, stanza := range manifest.GetLaunchableStanzas() {
		if stanza.DigestLocation == "" {
			continue
		}
		launchable, err := pod.getLaunchable(launchableID, stanza, manifest.RunAsUser())
		if err != nil {
			return err
		}

		digestLocationURL, err := url.Parse(stanza.DigestLocation)
		if err != nil {
			return util.Errorf("Couldn't parse digest location '%s' as a url: %s", stanza.DigestLocation, err)
		}

		digestSignatureLocationURL, err := url.Parse(stanza.DigestSignatureLocation)
		if err != nil {
			return util.Errorf("Couldn't parse digest signature location '%s' as a url: %s", stanza.DigestSignatureLocation, err)
		}

		// Retrieve the digest data
		launchableDigest, err := digest.ParseUris(
			uri.DefaultFetcher,
			digestLocationURL,
			digestSignatureLocationURL,
		)
		if err != nil {
			return err
		}

		// Check that the digest is certified
		err = authPolicy.CheckDigest(launchableDigest)
		if err != nil {
			return err
		}

		// Check that the installed files match the digest
		err = launchableDigest.VerifyDir(launchable.InstallDir())
		if err != nil {
			return err
		}
	}
	return nil
}
Exemple #11
0
func manifestMustEqual(expected, actual manifest.Manifest, t *testing.T) {
	actualSha, err := actual.SHA()
	Assert(t).IsNil(err, "should have gotten SHA from old manifest")
	expectedSha, err := expected.SHA()
	Assert(t).IsNil(err, "should have gotten SHA from known old manifest")
	manifestBytes, err := expected.Marshal()
	Assert(t).IsNil(err, "should have gotten bytes from manifest")
	actualBytes, err := actual.Marshal()
	Assert(t).IsNil(err, "should have gotten bytes from writtenOld")
	Assert(t).AreEqual(expectedSha, actualSha, fmt.Sprintf("known: \n\n%s\n\nactual:\n\n%s\n", string(manifestBytes), string(actualBytes)))
}
Exemple #12
0
func writeManifest(workingDir string, manifest manifest.Manifest) (string, error) {
	file, err := os.OpenFile(
		path.Join(workingDir, fmt.Sprintf("%s.yaml", podID())),
		os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
		0644,
	)
	if err != nil {
		return "", err
	}
	defer file.Close()

	err = manifest.Write(file)
	if err != nil {
		return "", err
	}
	return file.Name(), nil
}
Exemple #13
0
// matches podstore.consulStore signature
func (c Client) Schedule(manifest manifest.Manifest, node types.NodeName) (types.PodUniqueKey, error) {
	manifestBytes, err := manifest.Marshal()
	if err != nil {
		return "", util.Errorf("Could not marshal manifest: %s", err)
	}

	req := &podstore_protos.SchedulePodRequest{
		NodeName: node.String(),
		Manifest: string(manifestBytes),
	}

	resp, err := c.client.SchedulePod(context.Background(), req)
	if err != nil {
		return "", util.Errorf("Could not schedule pod: %s", err)
	}

	return types.PodUniqueKey(resp.PodUniqueKey), nil
}
Exemple #14
0
func (h *HookDir) runHooks(dirpath string, hType HookType, pod Pod, podManifest manifest.Manifest, logger logging.Logger) error {
	configFileName, err := podManifest.ConfigFileName()
	if err != nil {
		return err
	}

	// Write manifest to a file so hooks can read it.
	tmpManifestFile, err := ioutil.TempFile("", fmt.Sprintf("%s-manifest.yaml", podManifest.ID()))
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to open manifest file for hooks")
		return err
	}
	defer os.Remove(tmpManifestFile.Name())

	err = podManifest.Write(tmpManifestFile)
	if err != nil {
		logger.WithErrorAndFields(err, logrus.Fields{
			"dir": dirpath,
		}).Warnln("Unable to write manifest file for hooks")
		return err
	}

	hookEnvironment := []string{
		fmt.Sprintf("%s=%s", HOOK_ENV_VAR, path.Base(dirpath)),
		fmt.Sprintf("%s=%s", HOOK_EVENT_ENV_VAR, hType.String()),
		fmt.Sprintf("%s=%s", HOOKED_NODE_ENV_VAR, pod.Node()),
		fmt.Sprintf("%s=%s", HOOKED_POD_ID_ENV_VAR, podManifest.ID()),
		fmt.Sprintf("%s=%s", HOOKED_POD_HOME_ENV_VAR, pod.Home()),
		fmt.Sprintf("%s=%s", HOOKED_POD_MANIFEST_ENV_VAR, tmpManifestFile.Name()),
		fmt.Sprintf("%s=%s", HOOKED_CONFIG_PATH_ENV_VAR, path.Join(pod.ConfigDir(), configFileName)),
		fmt.Sprintf("%s=%s", HOOKED_ENV_PATH_ENV_VAR, pod.EnvDir()),
		fmt.Sprintf("%s=%s", HOOKED_CONFIG_DIR_PATH_ENV_VAR, pod.ConfigDir()),
		fmt.Sprintf("%s=%s", HOOKED_SYSTEM_POD_ROOT_ENV_VAR, h.podRoot),
	}

	return runDirectory(dirpath, hookEnvironment, logger)
}
Exemple #15
0
func createRC(
	rcs rcstore.Store,
	applicator labels.Applicator,
	manifest manifest.Manifest,
	desired int,
	nodes map[types.NodeName]bool,
) (rc_fields.RC, error) {
	created, err := rcs.Create(manifest, nil, nil)
	if err != nil {
		return rc_fields.RC{}, fmt.Errorf("Error creating RC: %s", err)
	}

	podID := string(manifest.ID())

	for node := range nodes {
		if err = applicator.SetLabel(labels.POD, node.String()+"/"+podID, rc.RCIDLabel, string(created.ID)); err != nil {
			return rc_fields.RC{}, fmt.Errorf("Error applying RC ID label: %s", err)
		}
	}

	return created, rcs.SetDesiredReplicas(created.ID, desired)
}
Exemple #16
0
// Transfers the named node from the old RC to the new RC
func transferNode(node types.NodeName, manifest manifest.Manifest, upd update) error {
	if _, err := upd.kps.SetPod(kp.REALITY_TREE, node, manifest); err != nil {
		return err
	}
	return upd.labeler.SetLabel(labels.POD, labels.MakePodLabelKey(node, manifest.ID()), rc.RCIDLabel, string(upd.NewRC))
}
Exemple #17
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()
	log.Println("Starting bootstrap")
	hostname, err := os.Hostname()
	if err != nil {
		log.Fatalf("error getting node name: %v", err)
	}
	nodeName := types.NodeName(hostname)
	agentManifest, err := manifest.FromPath(*agentManifestPath)
	if err != nil {
		log.Fatalln("Could not get agent manifest: %s", err)
	}
	log.Println("Installing and launching consul")

	podFactory := pods.NewFactory(*podRoot, nodeName)

	var consulPod *pods.Pod
	var consulManifest manifest.Manifest
	if *existingConsul == "" {
		consulManifest, err = manifest.FromPath(*consulManifestPath)
		if err != nil {
			log.Fatalf("Could not get consul manifest: %s", err)
		}

		// Consul will never have a uuid (for now)
		consulPod = podFactory.NewLegacyPod(consulManifest.ID())
		err = installConsul(consulPod, consulManifest, *registryURL)
		if err != nil {
			log.Fatalf("Could not install consul: %s", err)
		}
	} else {
		log.Printf("Using existing Consul at %s\n", *existingConsul)

		consulPod, err = pods.PodFromPodHome(nodeName, *existingConsul)
		if err != nil {
			log.Fatalf("The existing consul pod is invalid: %s", err)
		}
		consulManifest, err = consulPod.CurrentManifest()
		if err != nil {
			log.Fatalf("Cannot get the current consul manifest: %s", err)
		}
	}

	if err = verifyConsulUp(*timeout); err != nil {
		log.Fatalln(err)
	}
	time.Sleep(500 * time.Millisecond)
	// schedule consul in the reality store as well, to ensure the preparers do
	// not all restart their consul agents simultaneously after bootstrapping
	err = scheduleForThisHost(consulManifest, true)
	if err != nil {
		log.Fatalf("Could not register consul in the intent store: %s", err)
	}

	log.Println("Registering base agent in consul")
	err = scheduleForThisHost(agentManifest, false)
	if err != nil {
		log.Fatalf("Could not register base agent with consul: %s", err)
	}
	log.Println("Installing and launching base agent")
	err = installBaseAgent(podFactory, agentManifest, *registryURL)
	if err != nil {
		log.Fatalf("Could not install base agent: %s", err)
	}
	if err := verifyReality(30*time.Second, consulManifest.ID(), agentManifest.ID()); err != nil {
		log.Fatalln(err)
	}
	log.Println("Bootstrapping complete")
}
Exemple #18
0
func (c *consulStore) Schedule(manifest manifest.Manifest, node types.NodeName) (key types.PodUniqueKey, err error) {
	manifestBytes, err := manifest.Marshal()
	if err != nil {
		return "", err
	}

	podKey := types.NewPodUUID()

	podPath := computePodPath(podKey)
	intentIndexPath := computeIntentIndexPath(podKey, node)

	// Write the Pod to /pods/<key>
	pod := RawPod{
		Manifest: string(manifestBytes),
		Node:     node,
	}

	podBytes, err := json.Marshal(pod)
	if err != nil {
		return "", err
	}

	pair := &api.KVPair{
		Key:   podPath,
		Value: podBytes,
	}

	_, err = c.consulKV.Put(pair, nil)
	if err != nil {
		return "", consulutil.NewKVError("put", podPath, err)
	}

	// Now, write the secondary index to /intent/<node>/<key>
	index := PodIndex{
		PodKey: podKey,
	}

	// NOTE: errors might happen after this point which means we've written
	// a pod to /pods and we haven't written the corresponding index. In
	// those cases, we do a single attempt to delete the main pod key. In
	// the event of a Consul outage it's likely that the cleanup will fail,
	// so there will be a pod with no secondary index. For that purpose, a
	// sweeper process is planned to remove pods for which there is no index.
	// TODO: use a transaction when we can rely upon consul 0.7
	defer func() {
		if err != nil {
			_, _ = c.consulKV.Delete(podPath, nil)
		}
	}()

	indexBytes, err := json.Marshal(index)
	if err != nil {
		return "", util.Errorf("Could not marshal index as json: %s", err)
	}

	indexPair := &api.KVPair{
		Key:   intentIndexPath,
		Value: indexBytes,
	}
	_, err = c.consulKV.Put(indexPair, nil)
	if err != nil {
		return "", consulutil.NewKVError("put", intentIndexPath, err)
	}

	return podKey, nil
}
Exemple #19
0
func New(preparerConfig *PreparerConfig, logger logging.Logger) (*Preparer, error) {
	addHooks(preparerConfig, logger)

	if preparerConfig.ConsulAddress == "" {
		return nil, util.Errorf("No Consul address given to the preparer")
	}
	if preparerConfig.PodRoot == "" {
		return nil, util.Errorf("No pod root given to the preparer")
	}

	if preparerConfig.LogLevel != "" {
		lv, err := logrus.ParseLevel(preparerConfig.LogLevel)
		if err != nil {
			return nil, util.Errorf("Received invalid log level %q", preparerConfig.LogLevel)
		}
		logger.Logger.Level = lv
	}

	authPolicy, err := getDeployerAuth(preparerConfig)
	if err != nil {
		return nil, err
	}

	artifactVerifier, err := getArtifactVerifier(preparerConfig, &logger)
	if err != nil {
		return nil, err
	}

	artifactRegistry, err := getArtifactRegistry(preparerConfig)
	if err != nil {
		return nil, err
	}

	client, err := preparerConfig.GetConsulClient()
	if err != nil {
		return nil, err
	}

	statusStore := statusstore.NewConsul(client)
	podStatusStore := podstatus.NewConsul(statusStore, kp.PreparerPodStatusNamespace)
	podStore := podstore.NewConsul(client.KV())

	store := kp.NewConsulStore(client)

	maxLaunchableDiskUsage := launch.DefaultAllowableDiskUsage
	if preparerConfig.MaxLaunchableDiskUsage != "" {
		maxLaunchableDiskUsage, err = size.Parse(preparerConfig.MaxLaunchableDiskUsage)
		if err != nil {
			return nil, util.Errorf("Unparseable value for max_launchable_disk_usage %v, %v", preparerConfig.MaxLaunchableDiskUsage, err)
		}
	}

	err = os.MkdirAll(preparerConfig.PodRoot, 0755)
	if err != nil {
		return nil, util.Errorf("Could not create preparer pod directory: %s", err)
	}

	var logExec []string
	if len(preparerConfig.LogExec) > 0 {
		logExec = preparerConfig.LogExec
	} else {
		logExec = runit.DefaultLogExec()
	}

	finishExec := pods.NopFinishExec
	var podProcessReporter *podprocess.Reporter
	if preparerConfig.PodProcessReporterConfig.FullyConfigured() {
		podProcessReporterLogger := logger.SubLogger(logrus.Fields{
			"component": "PodProcessReporter",
		})

		podProcessReporter, err = podprocess.New(preparerConfig.PodProcessReporterConfig, podProcessReporterLogger, podStatusStore)
		if err != nil {
			return nil, err
		}

		finishExec = preparerConfig.PodProcessReporterConfig.FinishExec()
	}

	var hooksManifest manifest.Manifest
	var hooksPod *pods.Pod
	if preparerConfig.HooksManifest != NoHooksSentinelValue {
		if preparerConfig.HooksManifest == "" {
			return nil, util.Errorf("Most provide a hooks_manifest or sentinel value %q to indicate that there are no hooks", NoHooksSentinelValue)
		}

		hooksManifest, err = manifest.FromBytes([]byte(preparerConfig.HooksManifest))
		if err != nil {
			return nil, util.Errorf("Could not parse configured hooks manifest: %s", err)
		}
		hooksPodFactory := pods.NewHookFactory(filepath.Join(preparerConfig.PodRoot, "hooks"), preparerConfig.NodeName)
		hooksPod = hooksPodFactory.NewHookPod(hooksManifest.ID())
	}
	return &Preparer{
		node:                   preparerConfig.NodeName,
		store:                  store,
		hooks:                  hooks.Hooks(preparerConfig.HooksDirectory, preparerConfig.PodRoot, &logger),
		podStatusStore:         podStatusStore,
		podStore:               podStore,
		Logger:                 logger,
		podFactory:             pods.NewFactory(preparerConfig.PodRoot, preparerConfig.NodeName),
		authPolicy:             authPolicy,
		maxLaunchableDiskUsage: maxLaunchableDiskUsage,
		finishExec:             finishExec,
		logExec:                logExec,
		logBridgeBlacklist:     preparerConfig.LogBridgeBlacklist,
		artifactVerifier:       artifactVerifier,
		artifactRegistry:       artifactRegistry,
		PodProcessReporter:     podProcessReporter,
		hooksManifest:          hooksManifest,
		hooksPod:               hooksPod,
		hooksExecDir:           preparerConfig.HooksDirectory,
	}, nil
}
Exemple #20
0
// setupConfig does the following:
//
// 1) creates a directory in the pod's home directory called "config" which
// contains YAML configuration files (named with pod's ID and the SHA of its
// manifest's content) the path to which will be exported to a pods launchables
// via the CONFIG_PATH environment variable
//
// 2) writes an "env" directory in the pod's home directory called "env" which
// contains environment variables written as files that will be exported to all
// processes started by all launchables (as described in
// http://smarden.org/runit/chpst.8.html, with the -e option), including
// CONFIG_PATH
//
// 3) writes an "env" directory for each launchable. The "env" directory
// contains environment files specific to a launchable (such as
// LAUNCHABLE_ROOT)
//
// We may wish to provide a "config" directory per launchable at some point as
// well, so that launchables can have different config namespaces
func (pod *Pod) setupConfig(manifest manifest.Manifest, launchables []launch.Launchable) error {
	uid, gid, err := user.IDs(manifest.RunAsUser())
	if err != nil {
		return util.Errorf("Could not determine pod UID/GID: %s", err)
	}
	var configData bytes.Buffer
	err = manifest.WriteConfig(&configData)
	if err != nil {
		return err
	}
	var platConfigData bytes.Buffer
	err = manifest.WritePlatformConfig(&platConfigData)
	if err != nil {
		return err
	}

	err = util.MkdirChownAll(pod.ConfigDir(), uid, gid, 0755)
	if err != nil {
		return util.Errorf("Could not create config directory for pod %s: %s", manifest.ID(), err)
	}
	configFileName, err := manifest.ConfigFileName()
	if err != nil {
		return err
	}
	configPath := filepath.Join(pod.ConfigDir(), configFileName)
	err = writeFileChown(configPath, configData.Bytes(), uid, gid)
	if err != nil {
		return util.Errorf("Error writing config file for pod %s: %s", manifest.ID(), err)
	}
	platConfigFileName, err := manifest.PlatformConfigFileName()
	if err != nil {
		return err
	}
	platConfigPath := filepath.Join(pod.ConfigDir(), platConfigFileName)
	err = writeFileChown(platConfigPath, platConfigData.Bytes(), uid, gid)
	if err != nil {
		return util.Errorf("Error writing platform config file for pod %s: %s", manifest.ID(), err)
	}

	err = util.MkdirChownAll(pod.EnvDir(), uid, gid, 0755)
	if err != nil {
		return util.Errorf("Could not create the environment dir for pod %s: %s", manifest.ID(), err)
	}
	err = writeEnvFile(pod.EnvDir(), ConfigPathEnvVar, configPath, uid, gid)
	if err != nil {
		return err
	}
	err = writeEnvFile(pod.EnvDir(), PlatformConfigPathEnvVar, platConfigPath, uid, gid)
	if err != nil {
		return err
	}
	err = writeEnvFile(pod.EnvDir(), PodHomeEnvVar, pod.Home(), uid, gid)
	if err != nil {
		return err
	}
	err = writeEnvFile(pod.EnvDir(), PodIDEnvVar, pod.Id.String(), uid, gid)
	if err != nil {
		return err
	}
	err = writeEnvFile(pod.EnvDir(), PodUniqueKeyEnvVar, pod.uniqueKey.String(), uid, gid)
	if err != nil {
		return err
	}

	for _, launchable := range launchables {
		// we need to remove any unset env vars from a previous pod
		err = os.RemoveAll(launchable.EnvDir())
		if err != nil {
			return err
		}

		err = util.MkdirChownAll(launchable.EnvDir(), uid, gid, 0755)
		if err != nil {
			return util.Errorf("Could not create the environment dir for pod %s launchable %s: %s", manifest.ID(), launchable.ServiceID(), err)
		}
		err = writeEnvFile(launchable.EnvDir(), LaunchableIDEnvVar, launchable.ID().String(), uid, gid)
		if err != nil {
			return err
		}
		err = writeEnvFile(launchable.EnvDir(), "LAUNCHABLE_ROOT", launchable.InstallDir(), uid, gid)
		if err != nil {
			return err
		}
		// last, write the user-supplied env variables to ensure priority of user-supplied values
		for envName, value := range launchable.EnvVars() {
			err = writeEnvFile(launchable.EnvDir(), envName, fmt.Sprint(value), uid, gid)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
Exemple #21
0
// Install will ensure that executables for all required services are present on the host
// machine and are set up to run. In the case of Hoist artifacts (which is the only format
// supported currently, this will set up runit services.).
func (pod *Pod) Install(manifest manifest.Manifest, verifier auth.ArtifactVerifier, artifactRegistry artifact.Registry) error {
	podHome := pod.home
	uid, gid, err := user.IDs(manifest.RunAsUser())
	if err != nil {
		return util.Errorf("Could not determine pod UID/GID for %s: %s", manifest.RunAsUser(), err)
	}

	err = util.MkdirChownAll(podHome, uid, gid, 0755)
	if err != nil {
		return util.Errorf("Could not create pod home: %s", err)
	}

	launchables, err := pod.Launchables(manifest)
	if err != nil {
		return err
	}

	downloader := artifact.NewLocationDownloader(pod.Fetcher, verifier)
	for launchableID, stanza := range manifest.GetLaunchableStanzas() {
		// TODO: investigate passing in necessary fields to InstallDir()
		launchable, err := pod.getLaunchable(launchableID, stanza, manifest.RunAsUser())
		if err != nil {
			pod.logLaunchableError(launchable.ServiceID(), err, "Unable to install launchable")
			return err
		}

		if launchable.Installed() {
			continue
		}

		launchableURL, verificationData, err := artifactRegistry.LocationDataForLaunchable(launchableID, stanza)
		if err != nil {
			pod.logLaunchableError(launchable.ServiceID(), err, "Unable to install launchable")
			return err
		}

		err = downloader.Download(launchableURL, verificationData, launchable.InstallDir(), manifest.RunAsUser())
		if err != nil {
			pod.logLaunchableError(launchable.ServiceID(), err, "Unable to install launchable")
			_ = os.Remove(launchable.InstallDir())
			return err
		}

		err = launchable.PostInstall()
		if err != nil {
			pod.logLaunchableError(launchable.ServiceID(), err, "Unable to install launchable")
			_ = os.Remove(launchable.InstallDir())
			return err
		}
	}

	// we may need to write config files to a unique directory per pod version, depending on restart semantics. Need
	// to think about this more.
	err = pod.setupConfig(manifest, launchables)
	if err != nil {
		pod.logError(err, "Could not setup config")
		return util.Errorf("Could not setup config: %s", err)
	}

	pod.logInfo("Successfully installed")

	return nil
}
Exemple #22
0
func (f *FakePodStore) SetPod(podPrefix kp.PodPrefix, hostname types.NodeName, manifest manifest.Manifest) (time.Duration, error) {
	f.podLock.Lock()
	defer f.podLock.Unlock()
	f.podResults[FakePodStoreKeyFor(podPrefix, hostname, manifest.ID())] = manifest
	return 0, nil
}
func (s *fakeKpStore) SetPod(podPrefix kp.PodPrefix, nodeName types.NodeName, manifest manifest.Manifest) (time.Duration, error) {
	key := path.Join(string(podPrefix), nodeName.String(), string(manifest.ID()))
	s.manifests[key] = manifest
	return 0, nil
}
Exemple #24
0
func generatePreparerPod(workdir string, userHookManifest manifest.Manifest) (string, error) {
	// build the artifact from HEAD
	output, err := exec.Command("go", "build", "github.com/square/p2/bin/p2-preparer").CombinedOutput()
	if err != nil {
		return "", util.Errorf("Couldn't build preparer: %s\nOutput:\n%s", err, string(output))
	}
	wd, _ := os.Getwd()
	hostname, err := os.Hostname()
	if err != nil {
		return "", util.Errorf("Couldn't get hostname: %s", err)
	}
	// the test number forces the pod manifest to change every test run.
	testNumber := fmt.Sprintf("test=%d", rand.Intn(2000000000))
	cmd := exec.Command("p2-bin2pod", "--work-dir", workdir, "--id", "p2-preparer", "--config", fmt.Sprintf("node_name=%s", hostname), "--config", testNumber, wd+"/p2-preparer")
	prepBin2Pod, err := executeBin2Pod(cmd)
	if err != nil {
		return "", err
	}

	if err = signBuild(prepBin2Pod.TarPath); err != nil {
		return "", err
	}

	manifest, err := manifest.FromPath(prepBin2Pod.ManifestPath)
	if err != nil {
		return "", err
	}
	builder := manifest.GetBuilder()
	builder.SetID("p2-preparer")

	envExtractorPath, err := exec.Command("which", "p2-finish-env-extractor").CombinedOutput()
	if err != nil {
		return "", fmt.Errorf("Could not find p2-finish-env-extractor on PATH")
	}

	userCreationHookBytes, err := userHookManifest.Marshal()
	if err != nil {
		return "", util.Errorf("Couldn't marshal user hook manifest: %s", err)
	}

	err = builder.SetConfig(map[interface{}]interface{}{
		"preparer": map[interface{}]interface{}{
			"auth": map[string]string{
				"type":    "keyring",
				"keyring": util.From(runtime.Caller(0)).ExpandPath("pubring.gpg"),
			},
			"artifact_auth": map[interface{}]interface{}{
				"type":    "build",
				"keyring": util.From(runtime.Caller(0)).ExpandPath("pubring.gpg"),
			},
			"ca_file":     filepath.Join(certpath, "cert.pem"),
			"cert_file":   filepath.Join(certpath, "cert.pem"),
			"key_file":    filepath.Join(certpath, "key.pem"),
			"status_port": preparerStatusPort,
			"process_result_reporter_config": map[string]string{
				"sqlite_database_path":       sqliteFinishDatabasePath,
				"environment_extractor_path": strings.TrimSpace(string(envExtractorPath)),
				"workspace_dir_path":         "/data/pods/p2-preparer/tmp",
			},
			"hooks_manifest": string(userCreationHookBytes),
		},
	})
	if err != nil {
		return "", err
	}

	builder.SetRunAsUser("root")
	builder.SetStatusPort(preparerStatusPort)
	builder.SetStatusHTTP(true)

	manifest = builder.GetManifest()

	manifestBytes, err := manifest.Marshal()
	if err != nil {
		return "", err
	}

	err = ioutil.WriteFile(prepBin2Pod.ManifestPath, manifestBytes, 0644)
	if err != nil {
		return "", err
	}

	return prepBin2Pod.ManifestPath, err
}