// Cleanup removes all documents that were previously marked for removal, if // any such exist. It should be called periodically by at least one element // of the system. func (st *State) Cleanup() error { doc := cleanupDoc{} iter := st.cleanups.Find(nil).Iter() for iter.Next(&doc) { var err error switch doc.Kind { case "settings": err = st.cleanupSettings(doc.Prefix) case "units": err = st.cleanupUnits(doc.Prefix) default: err = fmt.Errorf("unknown cleanup kind %q", doc.Kind) } if err != nil { log.Warningf("cleanup failed: %v", err) continue } ops := []txn.Op{{ C: st.cleanups.Name, Id: doc.Id, Remove: true, }} if err := st.runTransaction(ops); err != nil { return fmt.Errorf("cannot remove empty cleanup document: %v", err) } } if err := iter.Err(); err != nil { return fmt.Errorf("cannot read cleanup document: %v", err) } return nil }
func removeAll(dir string) { err := os.RemoveAll(dir) if err == nil || os.IsNotExist(err) { return } log.Warningf("environs: cannot remove %q: %v", dir, err) }
func cleanTempFile(f *os.File) { if f != nil { f.Close() if err := os.Remove(f.Name()); err != nil { log.Warningf("downloader: cannot remove temp file %q: %v", f.Name(), err) } } }
// revision returns the revision and SHA256 digest of the charm referenced by curl. func (s *CharmStore) revision(curl *URL) (revision int, digest string, err error) { info, err := s.Info(curl) if err != nil { return 0, "", err } for _, w := range info.Warnings { log.Warningf("charm: charm store reports for %q: %s", curl, w) } if info.Errors != nil { return 0, "", fmt.Errorf("charm info errors for %q: %s", curl, strings.Join(info.Errors, "; ")) } return info.Revision, info.Sha256, nil }
// collectOrphans deletes all repos in path except the one pointed to by current. // Errors are generally ignored; some are logged. func (d *Deployer) collectOrphans() { current, err := os.Readlink(d.current.Path()) if err != nil { return } filepath.Walk(d.path, func(repoPath string, fi os.FileInfo, err error) error { if err != nil && repoPath != d.path && repoPath != current { if err = os.RemoveAll(repoPath); err != nil { log.Warningf("worker/uniter/charm: failed to remove orphan repo at %s: %s", repoPath, err) } } return err }) }
// Get returns a charm matching curl, if one exists. If curl has a revision of // -1, it returns the latest charm that matches curl. If multiple candidates // satisfy the foregoing, the first one encountered will be returned. func (r *LocalRepository) Get(curl *URL) (Charm, error) { if curl.Schema != "local" { return nil, fmt.Errorf("local repository got URL with non-local schema: %q", curl) } info, err := os.Stat(r.Path) if err != nil { if os.IsNotExist(err) { err = repoNotFound(r.Path) } return nil, err } if !info.IsDir() { return nil, repoNotFound(r.Path) } path := filepath.Join(r.Path, curl.Series) infos, err := ioutil.ReadDir(path) if err != nil { return nil, charmNotFound(curl, r.Path) } var latest Charm for _, info := range infos { chPath := filepath.Join(path, info.Name()) if info.Mode()&os.ModeSymlink != 0 { var err error if info, err = os.Stat(chPath); err != nil { return nil, err } } if !mightBeCharm(info) { continue } if ch, err := Read(chPath); err != nil { log.Warningf("charm: failed to load charm at %q: %s", chPath, err) } else if ch.Meta().Name == curl.Name { if ch.Revision() == curl.Revision { return ch, nil } if latest == nil || ch.Revision() > latest.Revision() { latest = ch } } } if curl.Revision == -1 && latest != nil { return latest, nil } return nil, charmNotFound(curl, r.Path) }
// Deploy deploys the current charm to the target directory. func (d *Deployer) Deploy(target *GitDir) (err error) { defer func() { if err == ErrConflict { log.Warningf("worker/uniter/charm: charm deployment completed with conflicts") } else if err != nil { err = fmt.Errorf("charm deployment failed: %s", err) log.Errorf("worker/uniter/charm: %v", err) } else { log.Infof("worker/uniter/charm: charm deployment succeeded") } }() if exists, err := d.current.Exists(); err != nil { return err } else if !exists { return fmt.Errorf("no charm set") } if exists, err := target.Exists(); err != nil { return err } else if !exists { return d.install(target) } return d.upgrade(target) }
func (e *environ) portsInGroup(name string) (ports []instance.Port, err error) { g := ec2.SecurityGroup{Name: name} resp, err := e.ec2().SecurityGroups([]ec2.SecurityGroup{g}, nil) if err != nil { return nil, err } if len(resp.Groups) != 1 { return nil, fmt.Errorf("expected one security group, got %d", len(resp.Groups)) } for _, p := range resp.Groups[0].IPPerms { if len(p.SourceIPs) != 1 { log.Warningf("environs/ec2: unexpected IP permission found: %v", p) continue } for i := p.FromPort; i <= p.ToPort; i++ { ports = append(ports, instance.Port{ Protocol: p.Protocol, Number: i, }) } } state.SortPorts(ports) return ports, nil }
// StateJobs returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { st, entity, err := openState(a.Conf.Conf, a) if err != nil { return nil, err } // If this fails, other bits will fail, so we just log the error, and // let the other failures actually restart runners if err := EnsureAPIInfo(a.Conf.Conf, st, entity); err != nil { log.Warningf("failed to EnsureAPIInfo: %v", err) } reportOpenedState(st) m := entity.(*state.Machine) // TODO(rog) use more discriminating test for errors // rather than taking everything down indiscriminately. dataDir := a.Conf.DataDir runner := worker.NewRunner(allFatal, moreImportant) runner.StartWorker("upgrader", func() (worker.Worker, error) { // TODO(rog) use id instead of *Machine (or introduce Clone method) return NewUpgrader(st, m, dataDir), nil }) // At this stage, since we don't embed lxc containers, just start an lxc // provisioner task for non-lxc containers. Since we have only LXC // containers and normal machines, this effectively means that we only // have an LXC provisioner when we have a normally provisioned machine // (through the environ-provisioner). With the upcoming advent of KVM // containers, it is likely that we will want an LXC provisioner on a KVM // machine, and once we get nested LXC containers, we can remove this // check. providerType := os.Getenv("JUJU_PROVIDER_TYPE") if providerType != provider.Local && m.ContainerType() != instance.LXC { workerName := fmt.Sprintf("%s-provisioner", provisioner.LXC) runner.StartWorker(workerName, func() (worker.Worker, error) { return provisioner.NewProvisioner(provisioner.LXC, st, a.MachineId, dataDir), nil }) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. if providerType == provider.Local && m.Id() == bootstrapMachineId { runner.StartWorker("local-storage", func() (worker.Worker, error) { return localstorage.NewWorker(), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: runner.StartWorker("deployer", func() (worker.Worker, error) { return newDeployer(st, m.Id(), dataDir), nil }) case state.JobManageEnviron: runner.StartWorker("environ-provisioner", func() (worker.Worker, error) { return provisioner.NewProvisioner(provisioner.ENVIRON, st, a.MachineId, dataDir), nil }) runner.StartWorker("firewaller", func() (worker.Worker, error) { return firewaller.NewFirewaller(st), nil }) case state.JobManageState: runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. if len(a.Conf.StateServerCert) == 0 || len(a.Conf.StateServerKey) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } return apiserver.NewServer(st, fmt.Sprintf(":%d", a.Conf.APIPort), a.Conf.StateServerCert, a.Conf.StateServerKey) }) runner.StartWorker("cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) runner.StartWorker("resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) default: log.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }
func (zp *zipPacker) visit(path string, fi os.FileInfo, err error) error { if err != nil { return err } relpath, err := filepath.Rel(zp.root, path) if err != nil { return err } method := zip.Deflate hidden := len(relpath) > 1 && relpath[0] == '.' if fi.IsDir() { if relpath == "build" { return filepath.SkipDir } if hidden { return filepath.SkipDir } relpath += "/" method = zip.Store } mode := fi.Mode() if err := checkFileType(relpath, mode); err != nil { return err } if mode&os.ModeSymlink != 0 { method = zip.Store } if hidden || relpath == "revision" { return nil } h := &zip.FileHeader{ Name: relpath, Method: method, } perm := os.FileMode(0644) if mode&os.ModeSymlink != 0 { perm = 0777 } else if mode&0100 != 0 { perm = 0755 } if filepath.Dir(relpath) == "hooks" { hookName := filepath.Base(relpath) if _, ok := zp.hooks[hookName]; !fi.IsDir() && ok && mode&0100 == 0 { log.Warningf("charm: making %q executable in charm", path) perm = perm | 0100 } } h.SetMode(mode&^0777 | perm) w, err := zp.CreateHeader(h) if err != nil || fi.IsDir() { return err } var data []byte if mode&os.ModeSymlink != 0 { target, err := os.Readlink(path) if err != nil { return err } if err := checkSymlinkTarget(zp.root, relpath, target); err != nil { return err } data = []byte(target) } else { data, err = ioutil.ReadFile(path) if err != nil { return err } } _, err = w.Write(data) return err }
// updateRelations responds to changes in the life states of the relations // with the supplied ids. If any id corresponds to an alive relation not // known to the unit, the uniter will join that relation and return its // relationer in the added list. func (u *Uniter) updateRelations(ids []int) (added []*Relationer, err error) { for _, id := range ids { if r, found := u.relationers[id]; found { rel := r.ru.Relation() if err := rel.Refresh(); err != nil { return nil, fmt.Errorf("cannot update relation %q: %v", rel, err) } if rel.Life() == state.Dying { if err := r.SetDying(); err != nil { return nil, err } else if r.IsImplicit() { delete(u.relationers, id) } } continue } // Relations that are not alive are simply skipped, because they // were not previously known anyway. rel, err := u.st.Relation(id) if err != nil { if errors.IsNotFoundError(err) { continue } return nil, err } if rel.Life() != state.Alive { continue } // Make sure we ignore relations not implemented by the unit's charm ch, err := corecharm.ReadDir(u.charm.Path()) if err != nil { return nil, err } if ep, err := rel.Endpoint(u.unit.ServiceName()); err != nil { return nil, err } else if !ep.ImplementedBy(ch) { log.Warningf("worker/uniter: skipping relation with unknown endpoint %q", ep) continue } dir, err := relation.ReadStateDir(u.relationsDir, id) if err != nil { return nil, err } err = u.addRelation(rel, dir) if err == nil { added = append(added, u.relationers[id]) continue } e := dir.Remove() if err != state.ErrCannotEnterScope { return nil, err } if e != nil { return nil, e } } if u.unit.IsPrincipal() { return added, nil } // If no Alive relations remain between a subordinate unit's service // and its principal's service, the subordinate must become Dying. keepAlive := false for _, r := range u.relationers { scope := r.ru.Endpoint().Scope if scope == corecharm.ScopeContainer && !r.dying { keepAlive = true break } } if !keepAlive { if err := u.unit.Destroy(); err != nil { return nil, err } } return added, nil }
func (p environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { // Check for valid changes for the base config values. if err := config.Validate(cfg, old); err != nil { return nil, err } // Check for deprecated fields and log a warning. We also print to stderr to ensure the user sees the message // even if they are not running with --debug. if defaultImageId := cfg.AllAttrs()["default-image-id"]; defaultImageId != nil && defaultImageId.(string) != "" { msg := fmt.Sprintf( "config attribute %q (%v) is deprecated and ignored, use simplestreams metadata instead", "default-image-id", defaultImageId) log.Warningf(msg) } if defaultInstanceType := cfg.AllAttrs()["default-instance-type"]; defaultInstanceType != nil && defaultInstanceType.(string) != "" { msg := fmt.Sprintf( "config attribute %q (%v) is deprecated and ignored", "default-instance-type", defaultInstanceType) log.Warningf(msg) } validated, err := cfg.ValidateUnknownAttrs(configFields, configDefaults) if err != nil { return nil, err } ecfg := &environConfig{cfg, validated} authMode := AuthMode(ecfg.authMode()) switch authMode { case AuthKeyPair: case AuthLegacy: case AuthUserPass: default: return nil, fmt.Errorf("invalid authorization mode: %q", authMode) } if ecfg.authURL() != "" { parts, err := url.Parse(ecfg.authURL()) if err != nil || parts.Host == "" || parts.Scheme == "" { return nil, fmt.Errorf("invalid auth-url value %q", ecfg.authURL()) } } cred := identity.CredentialsFromEnv() format := "required environment variable not set for credentials attribute: %s" if authMode == AuthUserPass || authMode == AuthLegacy { if ecfg.username() == "" { if cred.User == "" { return nil, fmt.Errorf(format, "User") } ecfg.attrs["username"] = cred.User } if ecfg.password() == "" { if cred.Secrets == "" { return nil, fmt.Errorf(format, "Secrets") } ecfg.attrs["password"] = cred.Secrets } } else if authMode == AuthKeyPair { if ecfg.accessKey() == "" { if cred.User == "" { return nil, fmt.Errorf(format, "User") } ecfg.attrs["access-key"] = cred.User } if ecfg.secretKey() == "" { if cred.Secrets == "" { return nil, fmt.Errorf(format, "Secrets") } ecfg.attrs["secret-key"] = cred.Secrets } } if ecfg.authURL() == "" { if cred.URL == "" { return nil, fmt.Errorf(format, "URL") } ecfg.attrs["auth-url"] = cred.URL } if ecfg.tenantName() == "" { if cred.TenantName == "" { return nil, fmt.Errorf(format, "TenantName") } ecfg.attrs["tenant-name"] = cred.TenantName } if ecfg.region() == "" { if cred.Region == "" { return nil, fmt.Errorf(format, "Region") } ecfg.attrs["region"] = cred.Region } if old != nil { attrs := old.UnknownAttrs() if region, _ := attrs["region"].(string); ecfg.region() != region { return nil, fmt.Errorf("cannot change region from %q to %q", region, ecfg.region()) } if controlBucket, _ := attrs["control-bucket"].(string); ecfg.controlBucket() != controlBucket { return nil, fmt.Errorf("cannot change control-bucket from %q to %q", controlBucket, ecfg.controlBucket()) } } // Apply the coerced unknown values back into the config. return cfg.Apply(ecfg.attrs) }
func (u *Upgrader) run() error { // Let the state know the version that is currently running. currentTools, err := tools.ReadTools(u.dataDir, version.Current) if err != nil { // Don't abort everything because we can't find the tools directory. // The problem should sort itself out as we will immediately // download some more tools and upgrade. log.Warningf("upgrader cannot read current tools: %v", err) currentTools = &tools.Tools{ Binary: version.Current, } } err = u.agentState.SetAgentTools(currentTools) if err != nil { return err } // TODO(fwereade): this whole package should be ignorant of environs, // so it shouldn't be watching environ config (and it shouldn't be // looking in storage): we should be able to find out what to download // from state, exactly as we do for charms. w := u.st.WatchEnvironConfig() defer watcher.Stop(w, &u.tomb) // Rather than using worker.WaitForEnviron, invalid environments are // managed explicitly so that all configuration changes are observed // by the loop below. var environ environs.Environ // TODO(rog) retry downloads when they fail. var ( download *downloader.Download downloadTools *tools.Tools downloadDone <-chan downloader.Status ) // If we're killed early on (probably as a result of some other // task dying) we allow ourselves some time to try to connect to // the state and download a new version. We return to normal // undelayed behaviour when: // 1) We find there's no upgrade to do. // 2) A download fails. tomb := delayedTomb(&u.tomb, upgraderKillDelay) noDelay := func() { if tomb != &u.tomb { tomb.Kill(nil) tomb = &u.tomb } } for { // We wait for the tools to change while we're downloading // so that if something goes wrong (for instance a bad URL // hangs up) another change to the proposed tools can // potentially fix things. select { case cfg, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } var err error if environ == nil { environ, err = environs.New(cfg) if err != nil { log.Errorf("upgrader loaded invalid initial environment configuration: %v", err) break } } else { err = environ.SetConfig(cfg) if err != nil { log.Warningf("upgrader loaded invalid environment configuration: %v", err) // continue on, because the version number is still significant. } } proposed, ok := cfg.AgentVersion() if !ok { // This shouldn't be possible; but if it happens it's no reason // to kill this task. Just wait for the config to change again. continue } if download != nil { // There's a download in progress, stop it if we need to. if downloadTools.Number == proposed { // We are already downloading the requested tools. break } // Tools changed. We need to stop and restart. download.Stop() download, downloadTools, downloadDone = nil, nil, nil } // TODO: major version upgrades. if proposed.Major != version.Current.Major { log.Errorf("major version upgrades are not supported yet") noDelay() break } if proposed == version.Current.Number { noDelay() break } required := version.Binary{ Number: proposed, Series: version.Current.Series, Arch: version.Current.Arch, } if tools, err := tools.ReadTools(u.dataDir, required); err == nil { // The exact tools have already been downloaded, so use them. return u.upgradeReady(currentTools, tools) } tools, err := environs.FindExactTools(environ, required) if err != nil { log.Errorf("upgrader error finding tools for %v: %v", required, err) if !errors.IsNotFoundError(err) { return err } noDelay() // TODO(rog): poll until tools become available. break } log.Infof("upgrader downloading %q", tools.URL) download = downloader.New(tools.URL, "") downloadTools = tools downloadDone = download.Done() case status := <-downloadDone: newTools := downloadTools download, downloadTools, downloadDone = nil, nil, nil if status.Err != nil { log.Errorf("upgrader download of %v failed: %v", newTools.Binary, status.Err) noDelay() break } err := tools.UnpackTools(u.dataDir, newTools, status.File) status.File.Close() if err := os.Remove(status.File.Name()); err != nil { log.Warningf("upgrader cannot remove temporary download file: %v", err) } if err != nil { log.Errorf("upgrader cannot unpack %v tools: %v", newTools.Binary, err) noDelay() break } return u.upgradeReady(currentTools, newTools) case <-tomb.Dying(): if download != nil { return fmt.Errorf("upgrader aborted download of %q", downloadTools.URL) } return nil } } panic("not reached") }
// sync updates the watcher knowledge from the database, and // queues events to observing channels. func (w *Watcher) sync() error { // Iterate through log events in reverse insertion order (newest first). iter := w.log.Find(nil).Batch(10).Sort("-$natural").Iter() seen := make(map[watchKey]bool) first := true lastId := w.lastId var entry bson.D for iter.Next(&entry) { if len(entry) == 0 { debugf("state/watcher: got empty changelog document") } id := entry[0] if id.Name != "_id" { panic("watcher: _id field isn't first entry") } if first { w.lastId = id.Value first = false } if id.Value == lastId { break } debugf("state/watcher: got changelog document: %#v", entry) for _, c := range entry[1:] { // See txn's Runner.ChangeLog for the structure of log entries. var d, r []interface{} dr, _ := c.Value.(bson.D) for _, item := range dr { switch item.Name { case "d": d, _ = item.Value.([]interface{}) case "r": r, _ = item.Value.([]interface{}) } } if len(d) == 0 || len(d) != len(r) { log.Warningf("state/watcher: changelog has invalid collection document: %#v", c) continue } for i := len(d) - 1; i >= 0; i-- { key := watchKey{c.Name, d[i]} if seen[key] { continue } seen[key] = true revno, ok := r[i].(int64) if !ok { log.Warningf("state/watcher: changelog has revno with type %T: %#v", r[i], r[i]) continue } if revno < 0 { revno = -1 } if w.current[key] == revno { continue } w.current[key] = revno // Queue notifications for per-collection watches. for _, info := range w.watches[watchKey{c.Name, nil}] { if info.filter != nil && !info.filter(d[i]) { continue } w.syncEvents = append(w.syncEvents, event{info.ch, key, revno}) } // Queue notifications for per-document watches. infos := w.watches[key] for i, info := range infos { if revno > info.revno || revno < 0 && info.revno >= 0 { infos[i].revno = revno w.syncEvents = append(w.syncEvents, event{info.ch, key, revno}) } } } } } if iter.Err() != nil { return fmt.Errorf("watcher iteration error: %v", iter.Err()) } return nil }
func (s *suite) TestWarningLogger(c *C) { input := "Hello World" log.Warningf(input) c.Assert(s.writer.String(), Equals, "WARNING juju "+input) }
func (p environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { // Check for valid changes for the base config values. if err := config.Validate(cfg, old); err != nil { return nil, err } // Check for deprecated fields and log a warning. We also print to stderr to ensure the user sees the message // even if they are not running with --debug. if defaultImageId := cfg.AllAttrs()["default-image-id"]; defaultImageId != nil && defaultImageId.(string) != "" { msg := fmt.Sprintf( "Config attribute %q (%v) is deprecated and ignored.\n"+ "Your cloud provider should have set up image metadata to provide the correct image id\n"+ "for your chosen series and archietcure. If this is a private Openstack deployment without\n"+ "existing image metadata, please run 'juju help image-metadata' to see how suitable image"+ "metadata can be generated.", "default-image-id", defaultImageId) log.Warningf(msg) } if defaultInstanceType := cfg.AllAttrs()["default-instance-type"]; defaultInstanceType != nil && defaultInstanceType.(string) != "" { msg := fmt.Sprintf( "Config attribute %q (%v) is deprecated and ignored.\n"+ "The correct instance flavor is determined using constraints, globally specified\n"+ "when an environment is bootstrapped, or individually when a charm is deployed.\n"+ "See 'juju help bootstrap' or 'juju help deploy'.", "default-instance-type", defaultInstanceType) log.Warningf(msg) } validated, err := cfg.ValidateUnknownAttrs(configFields, configDefaults) if err != nil { return nil, err } ecfg := &environConfig{cfg, validated} authMode := AuthMode(ecfg.authMode()) switch authMode { case AuthKeyPair: case AuthLegacy: case AuthUserPass: default: return nil, fmt.Errorf("invalid authorization mode: %q", authMode) } if ecfg.authURL() != "" { parts, err := url.Parse(ecfg.authURL()) if err != nil || parts.Host == "" || parts.Scheme == "" { return nil, fmt.Errorf("invalid auth-url value %q", ecfg.authURL()) } } cred := identity.CredentialsFromEnv() format := "required environment variable not set for credentials attribute: %s" if authMode == AuthUserPass || authMode == AuthLegacy { if ecfg.username() == "" { if cred.User == "" { return nil, fmt.Errorf(format, "User") } ecfg.attrs["username"] = cred.User } if ecfg.password() == "" { if cred.Secrets == "" { return nil, fmt.Errorf(format, "Secrets") } ecfg.attrs["password"] = cred.Secrets } } else if authMode == AuthKeyPair { if ecfg.accessKey() == "" { if cred.User == "" { return nil, fmt.Errorf(format, "User") } ecfg.attrs["access-key"] = cred.User } if ecfg.secretKey() == "" { if cred.Secrets == "" { return nil, fmt.Errorf(format, "Secrets") } ecfg.attrs["secret-key"] = cred.Secrets } } if ecfg.authURL() == "" { if cred.URL == "" { return nil, fmt.Errorf(format, "URL") } ecfg.attrs["auth-url"] = cred.URL } if ecfg.tenantName() == "" { if cred.TenantName == "" { return nil, fmt.Errorf(format, "TenantName") } ecfg.attrs["tenant-name"] = cred.TenantName } if ecfg.region() == "" { if cred.Region == "" { return nil, fmt.Errorf(format, "Region") } ecfg.attrs["region"] = cred.Region } if old != nil { attrs := old.UnknownAttrs() if region, _ := attrs["region"].(string); ecfg.region() != region { return nil, fmt.Errorf("cannot change region from %q to %q", region, ecfg.region()) } if controlBucket, _ := attrs["control-bucket"].(string); ecfg.controlBucket() != controlBucket { return nil, fmt.Errorf("cannot change control-bucket from %q to %q", controlBucket, ecfg.controlBucket()) } } // Apply the coerced unknown values back into the config. return cfg.Apply(ecfg.attrs) }