func (e EnvironmentExtractor) constructFinishFromEnvironment(exitCode int, exitStatus int) (FinishOutput, error) { podID := os.Getenv(pods.PodIDEnvVar) if podID == "" { return FinishOutput{}, util.Errorf("No %s env var set", pods.PodIDEnvVar) } launchableID := os.Getenv(pods.LaunchableIDEnvVar) if launchableID == "" { return FinishOutput{}, util.Errorf("No %s env var set", pods.LaunchableIDEnvVar) } entryPoint := os.Getenv(launch.EntryPointEnvVar) if entryPoint == "" { return FinishOutput{}, util.Errorf("No %s env var set", launch.EntryPointEnvVar) } // It's okay if this one is missing, most pods are "legacy" pods that have a blank unique key podUniqueKey := os.Getenv(pods.PodUniqueKeyEnvVar) return FinishOutput{ PodID: types.PodID(podID), LaunchableID: launch.LaunchableID(launchableID), EntryPoint: entryPoint, PodUniqueKey: types.PodUniqueKey(podUniqueKey), ExitCode: exitCode, ExitStatus: exitStatus, }, nil }
// Writes a key to the /reality tree to signify that the pod specified by the UUID has been // launched on the given node. func (c *consulStore) WriteRealityIndex(podKey types.PodUniqueKey, node types.NodeName) error { if podKey == "" { return util.Errorf("Pod store can only write index for pods with uuid keys") } realityIndexPath := computeRealityIndexPath(podKey, node) // Now, write the secondary index to /intent/<node>/<key> index := PodIndex{ PodKey: podKey, } indexBytes, err := json.Marshal(index) if err != nil { return util.Errorf("Could not marshal index as json: %s", err) } indexPair := &api.KVPair{ Key: realityIndexPath, Value: indexBytes, } _, err = c.consulKV.Put(indexPair, nil) if err != nil { return consulutil.NewKVError("put", realityIndexPath, err) } return nil }
func parseNodeSelectorWithPrompt( oldSelector klabels.Selector, newSelectorString string, applicator labels.Applicator, ) (klabels.Selector, error) { newSelector, err := parseNodeSelector(newSelectorString) if err != nil { return newSelector, err } if oldSelector.String() == newSelector.String() { return newSelector, nil } newNodeLabels, err := applicator.GetMatches(newSelector, labels.NODE, false) if err != nil { return newSelector, util.Errorf("Error getting matching labels: %v", err) } oldNodeLabels, err := applicator.GetMatches(oldSelector, labels.NODE, false) if err != nil { return newSelector, util.Errorf("Error getting matching labels: %v", err) } toRemove, toAdd := makeNodeChanges(oldNodeLabels, newNodeLabels) fmt.Printf("Changing deployment from '%v' to '%v':\n", oldSelector.String(), newSelectorString) fmt.Printf("Removing:%9s hosts %s\n", fmt.Sprintf("-%v", len(toRemove)), toRemove) fmt.Printf("Adding: %9s hosts %s\n", fmt.Sprintf("+%v", len(toAdd)), toAdd) fmt.Println("Continue?") if !confirm() { return newSelector, util.Errorf("User cancelled") } return newSelector, nil }
// Polls for either farm to get be populated by a daemon set with the same // node selector as the daemon set in the argument func waitForMutateSelectorFarms(firstFarm *Farm, secondFarm *Farm, ds ds_fields.DaemonSet) error { condition := func() error { if anotherDS, ok := firstFarm.children[ds.ID]; ok { if ds.ID != anotherDS.ds.ID() || ds.NodeSelector.String() != anotherDS.ds.GetNodeSelector().String() { return util.Errorf( "Daemon sets do not match, expected '%v', '%v', got '%v', '%v'", ds.ID, ds.NodeSelector.String(), anotherDS.ds.ID(), anotherDS.ds.GetNodeSelector().String(), ) } return nil } else if anotherDS, ok := secondFarm.children[ds.ID]; ok { if ds.ID != anotherDS.ds.ID() || ds.NodeSelector.String() != anotherDS.ds.GetNodeSelector().String() { return util.Errorf( "Daemon sets do not match, expected '%v', '%v', got '%v', '%v'", ds.ID, ds.NodeSelector.String(), anotherDS.ds.ID(), anotherDS.ds.GetNodeSelector().String(), ) } return nil } return util.Errorf("Farm does not have daemon set id") } return waitForCondition(condition) }
// Attempts to claim a lock. If the overrideLock is set, any existing lock holder // will be destroyed and one more attempt will be made to acquire the lock func (r Replicator) lock(lock kp.Lock, lockPath string, overrideLock bool) error { err := lock.Lock(lockPath) if _, ok := err.(kp.AlreadyLockedError); ok { holder, id, err := r.Store.LockHolder(lockPath) if err != nil { return util.Errorf("Lock already held for %q, could not determine holder due to error: %s", lockPath, err) } else if holder == "" { // we failed to acquire this lock, but there is no outstanding // holder // this indicates that the previous holder had a LockDelay, // which prevents other parties from acquiring the lock for a // limited time return util.Errorf("Lock for %q is blocked due to delay by previous holder", lockPath) } else if overrideLock { err = r.Store.DestroyLockHolder(id) if err != nil { return util.Errorf("Unable to destroy the current lock holder (%s) for %q: %s", holder, lockPath, err) } // try acquiring the lock again, but this time don't destroy holders so we don't try forever return r.lock(lock, lockPath, false) } else { return util.Errorf("Lock for %q already held by lock %q", lockPath, holder) } } return err }
func getArtifactVerifier(preparerConfig *PreparerConfig, logger *logging.Logger) (auth.ArtifactVerifier, error) { var verif ManifestVerification var err error switch t, _ := preparerConfig.ArtifactAuth["type"].(string); t { case "", auth.VerifyNone: return auth.NopVerifier(), nil case auth.VerifyManifest: err = castYaml(preparerConfig.ArtifactAuth, &verif) if err != nil { return nil, util.Errorf("error configuring artifact verification: %v", err) } return auth.NewBuildManifestVerifier(verif.KeyringPath, uri.DefaultFetcher, logger) case auth.VerifyBuild: err = castYaml(preparerConfig.ArtifactAuth, &verif) if err != nil { return nil, util.Errorf("error configuring artifact verification: %v", err) } return auth.NewBuildVerifier(verif.KeyringPath, uri.DefaultFetcher, logger) case auth.VerifyEither: err = castYaml(preparerConfig.ArtifactAuth, &verif) if err != nil { return nil, util.Errorf("error configuring artifact verification: %v", err) } return auth.NewCompositeVerifier(verif.KeyringPath, uri.DefaultFetcher, logger) default: return nil, util.Errorf("Unrecognized artifact verification type: %v", t) } }
// UnmarshalConfig reads the preparer's configuration from its bytes. func UnmarshalConfig(config []byte) (*PreparerConfig, error) { appConfig := AppConfig{} err := yaml.Unmarshal(config, &appConfig) preparerConfig := appConfig.P2PreparerConfig if err != nil { return nil, util.Errorf("The config file %s was malformatted - %s", config, err) } if preparerConfig.NodeName == "" { hostname, err := os.Hostname() if err != nil { return nil, util.Errorf("Couldn't determine hostname: %s", err) } preparerConfig.NodeName = types.NodeName(hostname) } if preparerConfig.ConsulAddress == "" { preparerConfig.ConsulAddress = DefaultConsulAddress } if preparerConfig.HooksDirectory == "" { preparerConfig.HooksDirectory = hooks.DEFAULT_PATH } if preparerConfig.PodRoot == "" { preparerConfig.PodRoot = pods.DefaultPath } return &preparerConfig, nil }
// Given a consul key path, returns the RC ID and the lock type. Returns an err // if the key does not resemble an RC lock key func (s *consulStore) lockTypeFromKey(key string) (fields.ID, LockType, error) { keyParts := strings.Split(key, "/") // Sanity check key structure e.g. /lock/replication_controllers/abcd-1234 if len(keyParts) < 3 || len(keyParts) > 4 { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } if keyParts[0] != consulutil.LOCK_TREE { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } if keyParts[1] != rcTree { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } rcID := keyParts[2] if len(keyParts) == 3 { // There's no lock suffix, so this is an ownership lock return fields.ID(rcID), OwnershipLockType, nil } switch keyParts[3] { case mutationSuffix: return fields.ID(rcID), MutationLockType, nil case updateCreationSuffix: return fields.ID(rcID), UpdateCreationLockType, nil default: return fields.ID(rcID), UnknownLockType, nil } }
// removePods unschedules pods for all scheduled nodes not selected // by ds.nodeSelector func (ds *daemonSet) removePods() error { podLocations, err := ds.CurrentPods() if err != nil { return util.Errorf("Error retrieving pod locations from daemon set: %v", err) } currentNodes := podLocations.Nodes() eligible, err := ds.EligibleNodes() if err != nil { return util.Errorf("Error retrieving eligible nodes for daemon set: %v", err) } // Get the difference in nodes that we need to unschedule on and then sort them // for deterministic ordering toUnscheduleSorted := types.NewNodeSet(currentNodes...).Difference(types.NewNodeSet(eligible...)).ListNodes() ds.logger.NoFields().Infof("Need to unschedule %d nodes", len(toUnscheduleSorted)) ds.cancelReplication() for _, node := range toUnscheduleSorted { err := ds.unschedule(node) if err != nil { return util.Errorf("Error unscheduling node: %v", err) } } ds.logger.Infof("Need to schedule %v nodes", len(currentNodes)) if len(currentNodes)-len(toUnscheduleSorted) > 0 { return ds.PublishToReplication() } return nil }
// addPods schedules pods for all unscheduled nodes selected by ds.nodeSelector func (ds *daemonSet) addPods() error { podLocations, err := ds.CurrentPods() if err != nil { return util.Errorf("Error retrieving pod locations from daemon set: %v", err) } currentNodes := podLocations.Nodes() eligible, err := ds.EligibleNodes() if err != nil { return util.Errorf("Error retrieving eligible nodes for daemon set: %v", err) } // TODO: Grab a lock here for the pod_id before adding something to check // contention and then disable // Get the difference in nodes that we need to schedule on and then sort them // for deterministic ordering toScheduleSorted := types.NewNodeSet(eligible...).Difference(types.NewNodeSet(currentNodes...)).ListNodes() ds.logger.NoFields().Infof("Need to label %d nodes", len(toScheduleSorted)) for _, node := range toScheduleSorted { err := ds.labelPod(node) if err != nil { return util.Errorf("Error labeling node: %v", err) } } ds.logger.Infof("Need to schedule %v nodes", len(currentNodes)) if len(currentNodes) > 0 { return ds.PublishToReplication() } return nil }
// Watch for changes to nodes and sends update and delete signals func (ds *daemonSet) handleNodeChanges(changes *labels.LabeledChanges) error { if len(changes.Updated) > 0 { ds.logger.NoFields().Infof("Received node change signal") err := ds.removePods() if err != nil { return util.Errorf("Unable to remove pods from intent tree: %v", err) } err = ds.addPods() if err != nil { return util.Errorf("Unable to add pods to intent tree: %v", err) } return nil } if len(changes.Created) > 0 { ds.logger.NoFields().Infof("Received node create signal") err := ds.addPods() if err != nil { return util.Errorf("Unable to add pods to intent tree: %v", err) } } if len(changes.Deleted) > 0 { ds.logger.NoFields().Infof("Received node delete signal") err := ds.removePods() if err != nil { return util.Errorf("Unable to remove pods from intent tree: %v", err) } } return nil }
func (p UserPolicy) AuthorizePod(podUser string, manifest Signed, logger logging.Logger) error { // Verify that the signature is valid plaintext, signature := manifest.SignatureData() if signature == nil { return Error{util.Errorf("received unsigned manifest"), nil} } keyringChan := p.keyringWatcher.GetAsync() dpolChan := p.deployWatcher.GetAsync() keyring := (<-keyringChan).(openpgp.EntityList) dpol := (<-dpolChan).(DeployPol) signer, err := checkDetachedSignature(keyring, plaintext, signature) if err != nil { return err } // Check if any of the signer's identites is authorized lastIdName := "(unknown)" for name, id := range signer.Identities { if dpol.Authorized(podUser, id.UserId.Email) { return nil } lastIdName = name } return Error{util.Errorf("user not authorized to deploy app: %s", lastIdName), nil} }
func LoadKeyring(path string) (openpgp.EntityList, error) { if path == "" { return nil, util.Errorf("no keyring configured") } f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() // Accept both ASCII-armored and binary encodings keyring, err := openpgp.ReadArmoredKeyRing(f) if err != nil && err.Error() == "openpgp: invalid argument: no armored data found" { offset, seekErr := f.Seek(0, os.SEEK_SET) if offset != 0 || seekErr != nil { return nil, util.Errorf( "couldn't seek to beginning, got %d %s", offset, seekErr, ) } keyring, err = openpgp.ReadKeyRing(f) } return keyring, err }
func (p FixedKeyringPolicy) AuthorizeApp(manifest Manifest, logger logging.Logger) error { plaintext, signature := manifest.SignatureData() if signature == nil { return Error{util.Errorf("received unsigned manifest (expected signature)"), nil} } signer, err := checkDetachedSignature(p.Keyring, plaintext, signature) if err != nil { return err } signerId := fmt.Sprintf("%X", signer.PrimaryKey.Fingerprint) logger.WithField("signer_key", signerId).Debugln("resolved manifest signature") // Check authorization for this package to be deployed by this // key, if configured. if len(p.AuthorizedDeployers[manifest.ID()]) > 0 { found := false for _, deployerId := range p.AuthorizedDeployers[manifest.ID()] { if deployerId == signerId { found = true break } } if !found { return Error{ util.Errorf("manifest signer not authorized to deploy " + manifest.ID()), map[string]interface{}{"signer_key": signerId}, } } } return nil }
func (c *consulStore) ReadPod(podKey types.PodUniqueKey) (Pod, error) { if podKey == "" { return Pod{}, util.Errorf("Pod store can only read pods with uuid keys") } if pod, ok := c.fetchFromCache(podKey); ok { return pod, nil } podPath := computePodPath(podKey) pair, _, err := c.consulKV.Get(podPath, nil) if err != nil { return Pod{}, consulutil.NewKVError("get", podPath, err) } if pair == nil { return Pod{}, NoPodError(podKey) } var pod Pod err = json.Unmarshal(pair.Value, &pod) if err != nil { return Pod{}, util.Errorf("Could not unmarshal pod '%s' as json: %s", podKey, err) } c.addToCache(podKey, pod) return pod, nil }
func (s *consulStore) Get(id fields.ID) (fields.DaemonSet, *api.QueryMeta, error) { var metadata *api.QueryMeta dsPath, err := s.dsPath(id) if err != nil { return fields.DaemonSet{}, metadata, util.Errorf("Error getting daemon set path: %v", err) } kvp, metadata, err := s.kv.Get(dsPath, nil) if err != nil { return fields.DaemonSet{}, metadata, consulutil.NewKVError("get", dsPath, err) } if metadata == nil { // no metadata returned return fields.DaemonSet{}, metadata, errors.New("No metadata found") } if kvp == nil { // ID didn't exist return fields.DaemonSet{}, metadata, NoDaemonSet } ds, err := kvpToDS(kvp) if err != nil { return fields.DaemonSet{}, metadata, util.Errorf("Error translating kvp to daemon set: %v", err) } return ds, metadata, nil }
func verifyConsulUp(timeout string) error { timeoutDur, err := time.ParseDuration(timeout) if err != nil { return err } if timeoutDur == 0 { return nil } config := api.DefaultConfig() config.Token = *consulToken client, err := api.NewClient(config) if err != nil { return util.Errorf("Could not construct consul client: '%s'", err) } consulIsUp := make(chan struct{}) go func() { for { time.Sleep(200 * time.Millisecond) err := Ping(client) if err == nil { consulIsUp <- struct{}{} return } } }() select { case <-time.After(timeoutDur): return util.Errorf("Consul did not start or was not available after %v", timeoutDur) case <-consulIsUp: return nil } }
func (s *consulStore) Create( manifest manifest.Manifest, minHealth int, name fields.ClusterName, nodeSelector klabels.Selector, podID types.PodID, timeout time.Duration, ) (fields.DaemonSet, error) { if err := checkManifestPodID(podID, manifest); err != nil { return fields.DaemonSet{}, util.Errorf("Error verifying manifest pod id: %v", err) } ds, err := s.innerCreate(manifest, minHealth, name, nodeSelector, podID, timeout) // TODO: measure whether retries are is important in practice for i := 0; i < s.retries; i++ { if _, ok := err.(CASError); ok { ds, err = s.innerCreate(manifest, minHealth, name, nodeSelector, podID, timeout) } else { break } } if err != nil { return fields.DaemonSet{}, util.Errorf("Error creating daemon set: %v", err) } return ds, nil }
// symlink the runit service directory into the actual directory being monitored // by runsvdir // runsvdir will automatically start a service for each new directory (unless a // down file exists) func (s *ServiceBuilder) activate(templates map[string]ServiceTemplate) error { for serviceName := range templates { linkPath := filepath.Join(s.RunitRoot, serviceName) stageDir := filepath.Join(s.StagingRoot, serviceName) info, err := os.Lstat(linkPath) if err == nil { // if it exists, make sure it is actually a symlink if info.Mode()&os.ModeSymlink == 0 { return util.Errorf("%s is not a symlink", linkPath) } // and that it points to the right place target, err := os.Readlink(linkPath) if err != nil { return err } if target != stageDir { return util.Errorf("%s is a symlink to %s (expected %s)", linkPath, target, stageDir) } } else if os.IsNotExist(err) { if err = os.Symlink(stageDir, linkPath); err != nil { return err } } else if err != nil { return err } } return nil }
func (a registry) authDataFromRegistryResponse(registryResponse RegistryResponse) (auth.VerificationData, error) { verificationData := auth.VerificationData{} if registryResponse.ManifestLocation != "" { manifestURL, err := url.Parse(registryResponse.ManifestLocation) if err != nil { return verificationData, util.Errorf("Couldn't parse manifest URL from registry response: %s", err) } verificationData.ManifestLocation = manifestURL } if registryResponse.ManifestSignatureLocation != "" { manifestSignatureURL, err := url.Parse(registryResponse.ManifestSignatureLocation) if err != nil { return verificationData, util.Errorf("Couldn't parse manifest signature URL from registry response: %s", err) } verificationData.ManifestSignatureLocation = manifestSignatureURL } if registryResponse.BuildSignatureLocation != "" { buildSignatureURL, err := url.Parse(registryResponse.BuildSignatureLocation) if err != nil { return verificationData, util.Errorf("Couldn't parse build signature URL from registry response: %s", err) } verificationData.BuildSignatureLocation = buildSignatureURL } return verificationData, nil }
// getTLSConfig constructs a tls.Config that uses keys/certificates in the given files. func getTLSConfig(certFile, keyFile, caFile string) (*tls.Config, error) { var certs []tls.Certificate if certFile != "" || keyFile != "" { if certFile == "" || keyFile == "" { return nil, util.Errorf("TLS client requires both cert file and key file") } cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, util.Errorf("Could not load keypair: %s", err) } certs = append(certs, cert) } var cas *x509.CertPool if caFile != "" { cas = x509.NewCertPool() caBytes, err := ioutil.ReadFile(caFile) if err != nil { return nil, err } ok := cas.AppendCertsFromPEM(caBytes) if !ok { return nil, util.Errorf("Could not parse certificate file: %s", caFile) } } tlsConfig := &tls.Config{ Certificates: certs, ClientCAs: cas, RootCAs: cas, } return tlsConfig, nil }
// Given a launchable stanza, returns the URL from which the artifact may be downloaded, as // well as an auth.VerificationData which can be used to verify the artifact. // There are two schemes for specifying this information in a launchable stanza: // 1) using the "location" field. In this case, the artifact location is simply the value // of the field and the path to the verification files is inferred using magic suffixes // 2) the "version" field is provided. In this case, the artifact registry is queried with // the information specified under the "version" key and the response contains the URLs // from which the extra files may be fetched, and these are returned. // // When using the first method, the following magical suffixes are assumed: // manifest: ".manifest" // manifest signature: ".manifest.sig" // build signature: ".sig" func (a registry) LocationDataForLaunchable(launchableID launch.LaunchableID, stanza launch.LaunchableStanza) (*url.URL, auth.VerificationData, error) { if stanza.Location == "" && stanza.Version.ID == "" { return nil, auth.VerificationData{}, util.Errorf("Launchable must provide either \"location\" or \"version\" fields") } if stanza.Location != "" && stanza.Version.ID != "" { return nil, auth.VerificationData{}, util.Errorf("Launchable must not provide both \"location\" and \"version\" fields") } // infer the verification data using magical suffixes if stanza.Location != "" { location, err := url.Parse(stanza.Location) if err != nil { return nil, auth.VerificationData{}, util.Errorf("Couldn't parse launchable url '%s': %s", stanza.Location, err) } verificationData := VerificationDataForLocation(location) return location, verificationData, nil } if a.registryURL == nil { return nil, auth.VerificationData{}, util.Errorf("No artifact registry configured and location field not present on launchable %s", launchableID) } return a.fetchRegistryData(launchableID, stanza.Version) }
func (f BasicFetcher) Open(srcUri string) (io.ReadCloser, error) { u, err := url.Parse(srcUri) if err != nil { return nil, err } switch u.Scheme { case "", "file": // Assume a schemeless URI is a path to a local file if !filepath.IsAbs(u.Path) { return nil, util.Errorf("%q: not an absolute path", u.Path) } return os.Open(u.Path) case "http", "https": resp, err := f.Client.Get(u.String()) if err != nil { return nil, err } if resp.StatusCode != http.StatusOK { resp.Body.Close() return nil, util.Errorf( "%q: HTTP server returned status: %s", u.String(), resp.Status, ) } return resp.Body, nil default: return nil, util.Errorf("%q: unknown scheme %s", u.String(), u.Scheme) } }
func (f BasicFetcher) Open(u *url.URL) (io.ReadCloser, error) { switch u.Scheme { case "": // Assume a schemeless URI is a path to a local file return os.Open(u.String()) case "file": if u.Path == "" { return nil, util.Errorf("%s: invalid path in URI", u.String()) } if !filepath.IsAbs(u.Path) { return nil, util.Errorf("%q: file URIs must use an absolute path", u.Path) } return os.Open(u.Path) case "http", "https": resp, err := f.Client.Get(u.String()) if err != nil { return nil, err } if resp.StatusCode != http.StatusOK { _ = resp.Body.Close() return nil, util.Errorf( "%q: HTTP server returned status: %s", u.String(), resp.Status, ) } return resp.Body, nil default: return nil, util.Errorf("%q: unknown scheme %s", u.String(), u.Scheme) } }
func waitForDisabled( dsf *Farm, dsStore *dsstoretest.FakeDSStore, dsID ds_fields.ID, isDisabled bool, ) error { var newDS ds_fields.DaemonSet var err error condition := func() error { newDS, _, err = dsStore.Get(dsID) if err != nil { return util.Errorf("Expected no error getting daemon set") } if newDS.Disabled != isDisabled { return util.Errorf("Unexpected disabled value. Expected '%v', got '%v'", newDS.Disabled, isDisabled, ) } if _, ok := dsf.children[newDS.ID]; !ok { return util.Errorf("Expected farm to create child daemon set with id '%v'", dsID) } if dsf.children[newDS.ID].ds.IsDisabled() != isDisabled { return util.Errorf("Unexpected disabled value in farm. Expected '%v', got '%v'", dsf.children[newDS.ID].ds.IsDisabled(), isDisabled, ) } return nil } return waitForCondition(condition) }
// FromBytes constructs a Manifest by parsing its serialized representation. The // manifest can be a raw YAML document or a PGP clearsigned YAML document. If signed, the // signature components will be stored inside the Manifest instance. func FromBytes(bytes []byte) (Manifest, error) { manifest := &manifest{} // Preserve the raw manifest so that manifest.Bytes() returns bytes in // the same order that they were passed to this function manifest.raw = make([]byte, len(bytes)) copy(manifest.raw, bytes) signed, _ := clearsign.Decode(bytes) if signed != nil { signature, err := ioutil.ReadAll(signed.ArmoredSignature.Body) if err != nil { return nil, util.Errorf("Could not read signature from pod manifest: %s", err) } manifest.signature = signature // the original plaintext is in signed.Plaintext, but the signature // corresponds to signed.Bytes, so that's what we need to save manifest.plaintext = signed.Bytes // parse YAML from the message's plaintext instead bytes = signed.Plaintext } if err := yaml.Unmarshal(bytes, manifest); err != nil { return nil, util.Errorf("Could not read pod manifest: %s", err) } if err := ValidManifest(manifest); err != nil { return nil, util.Errorf("invalid manifest: %s", err) } return manifest, nil }
// Deduces a PodUniqueKey from a consul path. This is useful as pod keys are transitioned // from using node name and pod ID to using UUIDs. // Input is expected to have 3 '/' separated sections, e.g. 'intent/<node>/<pod_id>' or // 'intent/<node>/<pod_uuid>' if the prefix is "intent" or "reality" // // /hooks is also a valid pod prefix and the key under it will not be a uuid. func PodUniqueKeyFromConsulPath(consulPath string) (*types.PodUniqueKey, error) { keyParts := strings.Split(consulPath, "/") if len(keyParts) == 0 { return nil, util.Errorf("Malformed key '%s'", consulPath) } if keyParts[0] == "hooks" { return nil, nil } if len(keyParts) != 3 { return nil, util.Errorf("Malformed key '%s'", consulPath) } // Unforunately we can't use kp.INTENT_TREE and kp.REALITY_TREE here because of an import cycle if keyParts[0] != "intent" && keyParts[0] != "reality" { return nil, util.Errorf("Unrecognized key tree '%s' (must be intent or reality)", keyParts[0]) } // Parse() returns nil if the input string does not match the uuid spec if uuid.Parse(keyParts[2]) != nil { return &types.PodUniqueKey{ ID: keyParts[2], }, nil } return nil, nil }
// Deduces a PodUniqueKey from a consul path. This is useful as pod keys are transitioned // from using node name and pod ID to using UUIDs. // Input is expected to have 3 '/' separated sections, e.g. 'intent/<node>/<pod_id>' or // 'intent/<node>/<pod_uuid>' if the prefix is "intent" or "reality" // // /hooks is also a valid pod prefix and the key under it will not be a uuid. func PodUniqueKeyFromConsulPath(consulPath string) (types.PodUniqueKey, error) { keyParts := strings.Split(consulPath, "/") if len(keyParts) == 0 { return "", util.Errorf("Malformed key '%s'", consulPath) } if keyParts[0] == "hooks" { return "", nil } if len(keyParts) != 3 { return "", util.Errorf("Malformed key '%s'", consulPath) } // Unforunately we can't use kp.INTENT_TREE and kp.REALITY_TREE here because of an import cycle if keyParts[0] != "intent" && keyParts[0] != "reality" { return "", util.Errorf("Unrecognized key tree '%s' (must be intent or reality)", keyParts[0]) } // Parse() returns nil if the input string does not match the uuid spec podUniqueKey, err := types.ToPodUniqueKey(keyParts[2]) switch { case err == types.InvalidUUID: // this is okay, it's just a legacy pod podUniqueKey = "" case err != nil: return "", util.Errorf("Could not test whether %s is a valid pod unique key: %s", keyParts[2], err) } return podUniqueKey, nil }
// requires a path to a platform configuration file in this format: // <launchablename>: // cgroup: // cpus: 4 // memory: 123456 // and the <launchablename> and <cgroupname> // a cgroup with the name <cgroupname> will be created, using the parameters for // <launchablename> found in the platform configuration // then, the current PID will be added to that cgroup func cgEnter(platconf, launchableName, cgroupName string) error { platconfBuf, err := ioutil.ReadFile(platconf) if err != nil { return err } cgMap := make(map[string]map[string]cgroups.Config) err = yaml.Unmarshal(platconfBuf, cgMap) if err != nil { return err } if _, ok := cgMap[launchableName]; !ok { return util.Errorf("Unknown launchable %q in PLATFORM_CONFIG_PATH", launchableName) } if _, ok := cgMap[launchableName]["cgroup"]; !ok { return util.Errorf("Launchable %q has malformed PLATFORM_CONFIG_PATH", launchableName) } cgConfig := cgMap[launchableName]["cgroup"] cgConfig.Name = cgroupName cg, err := cgroups.Find() if err != nil { return util.Errorf("Could not find cgroupfs mount point: %s", err) } err = cg.Write(cgConfig) if _, ok := err.(cgroups.UnsupportedError); ok { // if a subsystem is not supported, just log // and carry on log.Printf("Unsupported subsystem (%s), continuing\n", err) return nil } else if err != nil { return util.Errorf("Could not set cgroup parameters: %s", err) } return cg.AddPID(cgConfig.Name, os.Getpid()) }
func (u *update) shouldRollAfterDelay(podID types.PodID) (int, int, error) { // Check health again following the roll delay. If things have gotten // worse since we last looked, or there is an error, we break this iteration. checks, err := u.hcheck.Service(podID.String()) if err != nil { return 0, 0, util.Errorf("Could not retrieve health following delay: %v", err) } afterDelayNew, err := u.countHealthy(u.NewRC, checks) if err != nil { return 0, 0, util.Errorf("Could not determine new service health: %v", err) } afterDelayOld, err := u.countHealthy(u.OldRC, checks) if err != nil { return 0, 0, util.Errorf("Could not determine old service health: %v", err) } afterDelayRemove, afterDelayAdd := rollAlgorithm(u.rollAlgorithmParams(afterDelayOld, afterDelayNew)) if afterDelayRemove <= 0 && afterDelayAdd <= 0 { return 0, 0, util.Errorf("No nodes can be safely updated after %v roll delay, will wait again", u.RollDelay) } return afterDelayRemove, afterDelayAdd, nil }