func (s *UtilSuite) TestGetSystemIdValues(c *C) { instanceId1 := instance.Id("/MAAS/api/1.0/nodes/system_id1/") instanceId2 := instance.Id("/MAAS/api/1.0/nodes/system_id2/") instanceIds := []instance.Id{instanceId1, instanceId2} values := getSystemIdValues(instanceIds) c.Check(values["id"], DeepEquals, []string{"system_id1", "system_id2"}) }
func (suite *EnvironSuite) TestAllInstances(c *C) { env := makeEnviron(c) prefix := env.getEnvPrefix() services := []gwacl.HostedServiceDescriptor{{ServiceName: "deployment-in-another-env"}, {ServiceName: prefix + "deployment-1"}, {ServiceName: prefix + "deployment-2"}} requests := patchWithServiceListResponse(c, services) instances, err := env.AllInstances() c.Assert(err, IsNil) c.Check(len(instances), Equals, 2) c.Check(instances[0].Id(), Equals, instance.Id(prefix+"deployment-1")) c.Check(instances[1].Id(), Equals, instance.Id(prefix+"deployment-2")) c.Check(len(*requests), Equals, 1) }
// checkStopInstances checks that an instance has been stopped. func (s *CommonProvisionerSuite) checkStopInstances(c *C, instances ...instance.Instance) { s.State.StartSync() instanceIds := set.NewStrings() for _, instance := range instances { instanceIds.Add(string(instance.Id())) } // Continue checking for stop instance calls until all the instances we // are waiting on to finish, actually finish, or we time out. for !instanceIds.IsEmpty() { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStopInstances: for _, stoppedInstance := range o.Instances { instanceIds.Remove(string(stoppedInstance.Id())) } default: c.Fatalf("unexpected operation %#v", o) return } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not stop an instance") return } } }
func (s *lxcBrokerSuite) TestStartInstance(c *C) { machineId := "1/lxc/0" lxc := s.startInstance(c, machineId) c.Assert(lxc.Id(), Equals, instance.Id("juju-machine-1-lxc-0")) c.Assert(s.lxcContainerDir(lxc), IsDirectory) s.assertInstances(c, lxc) }
func (s *BootstrapSuite) TestInitializeEnvironment(c *C) { _, cmd, err := s.initBootstrapCommand(c, "--env-config", testConfig) c.Assert(err, IsNil) err = cmd.Run(nil) c.Assert(err, IsNil) st, err := state.Open(&state.Info{ Addrs: []string{testing.MgoAddr}, CACert: []byte(testing.CACert), Password: testPasswordHash(), }, state.DefaultDialOpts()) c.Assert(err, IsNil) defer st.Close() machines, err := st.AllMachines() c.Assert(err, IsNil) c.Assert(machines, HasLen, 1) instid, err := machines[0].InstanceId() c.Assert(err, IsNil) c.Assert(instid, Equals, instance.Id("dummy.instance.id")) cons, err := st.EnvironConstraints() c.Assert(err, IsNil) c.Assert(cons, DeepEquals, constraints.Value{}) }
func (e *environ) Destroy(ensureInsts []instance.Instance) error { log.Infof("environs/openstack: destroying environment %q", e.name) insts, err := e.AllInstances() if err != nil { return fmt.Errorf("cannot get instances: %v", err) } found := make(map[instance.Id]bool) var ids []instance.Id for _, inst := range insts { ids = append(ids, inst.Id()) found[inst.Id()] = true } // Add any instances we've been told about but haven't yet shown // up in the instance list. for _, inst := range ensureInsts { id := instance.Id(inst.(*openstackInstance).Id()) if !found[id] { ids = append(ids, id) found[id] = true } } err = e.terminateInstances(ids) if err != nil { return err } return e.Storage().RemoveAll() }
func (s *UtilSuite) TestExtractSystemId(c *C) { instanceId := instance.Id("/MAAS/api/1.0/nodes/system_id/") systemId := extractSystemId(instanceId) c.Check(systemId, Equals, "system_id") }
func (suite *EnvironSuite) TestInstancesReturnsErrorIfPartialInstances(c *C) { input1 := `{"system_id": "test"}` node1 := suite.testMAASObject.TestServer.NewNode(input1) resourceURI1, _ := node1.GetField("resource_uri") input2 := `{"system_id": "test2"}` suite.testMAASObject.TestServer.NewNode(input2) instanceId1 := instance.Id(resourceURI1) instanceId2 := instance.Id("unknown systemID") instanceIds := []instance.Id{instanceId1, instanceId2} instances, err := suite.environ.Instances(instanceIds) c.Check(err, Equals, environs.ErrPartialInstances) c.Check(len(instances), Equals, 1) c.Check(string(instances[0].Id()), Equals, resourceURI1) }
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { event := <-s.events c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, gc.IsNil) s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
func (suite *EnvironSuite) TestInstancesReturnsFilteredList(c *C) { services := []gwacl.HostedServiceDescriptor{{ServiceName: "deployment-1"}, {ServiceName: "deployment-2"}} requests := patchWithServiceListResponse(c, services) env := makeEnviron(c) instances, err := env.Instances([]instance.Id{"deployment-1"}) c.Assert(err, IsNil) c.Check(len(instances), Equals, 1) c.Check(instances[0].Id(), Equals, instance.Id("deployment-1")) c.Check(len(*requests), Equals, 1) }
func (suite *EnvironSuite) TestInstancesReturnsPartialInstancesIfSomeInstancesAreNotFound(c *C) { services := []gwacl.HostedServiceDescriptor{{ServiceName: "deployment-1"}, {ServiceName: "deployment-2"}} requests := patchWithServiceListResponse(c, services) env := makeEnviron(c) instances, err := env.Instances([]instance.Id{"deployment-1", "unknown-deployment"}) c.Assert(err, Equals, environs.ErrPartialInstances) c.Check(len(instances), Equals, 1) c.Check(instances[0].Id(), Equals, instance.Id("deployment-1")) c.Check(len(*requests), Equals, 1) }
// Run initializes state for an environment. func (c *BootstrapCommand) Run(_ *cmd.Context) error { if err := c.Conf.read("bootstrap"); err != nil { return err } cfg, err := config.New(c.EnvConfig) if err != nil { return err } // There is no entity that's created at init time. c.Conf.StateInfo.Tag = "" st, err := state.Initialize(c.Conf.StateInfo, cfg, state.DefaultDialOpts()) if err != nil { return err } defer st.Close() if err := environs.BootstrapUsers(st, cfg, c.Conf.OldPassword); err != nil { return err } // TODO(fwereade): we need to be able to customize machine jobs, // not just hardcode these values; in particular, JobHostUnits // on a machine, like this one, that is running JobManageEnviron // (not to mention the actual state server itself...) will allow // a malicious or compromised unit to trivially access to the // user's environment credentials. However, given that this point // is currently moot (see Upgrader in this package), the pseudo- // local provider mode (in which everything is deployed with // `--to 0`) offers enough value to enough people that // JobHostUnits is currently always enabled. This will one day // have to change, but it's strictly less important than fixing // Upgrader, and it's a capability we'll always want to have // available for the aforementioned use case. jobs := []state.MachineJob{ state.JobManageEnviron, state.JobManageState, state.JobHostUnits, } data, err := ioutil.ReadFile(providerStateURLFile) if err != nil { return fmt.Errorf("cannot read provider-state-url file: %v", err) } stateInfoURL := strings.Split(string(data), "\n")[0] bsState, err := environs.LoadStateFromURL(stateInfoURL) if err != nil { return fmt.Errorf("cannot load state from URL %q (read from %q): %v", stateInfoURL, providerStateURLFile, err) } instId := bsState.StateInstances[0] var characteristics instance.HardwareCharacteristics if len(bsState.Characteristics) > 0 { characteristics = bsState.Characteristics[0] } return environs.ConfigureBootstrapMachine(st, c.Constraints, c.Conf.DataDir, jobs, instance.Id(instId), characteristics) }
// StopInstances shuts down the given instances. func (broker *lxcBroker) StopInstances(instances []instance.Instance) error { // TODO: potentially parallelise. for _, instance := range instances { lxcLogger.Infof("stopping lxc container for instance: %s", instance.Id()) if err := broker.manager.StopContainer(instance); err != nil { lxcLogger.Errorf("container did not stop: %v", err) return err } } return nil }
func (suite *StateSuite) setupSavedState(c *C, storage environs.Storage) environs.BootstrapState { arch := "amd64" state := environs.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, Characteristics: []instance.HardwareCharacteristics{{Arch: &arch}}} content, err := goyaml.Marshal(state) c.Assert(err, IsNil) err = storage.Put(environs.StateFile, ioutil.NopCloser(bytes.NewReader(content)), int64(len(content))) c.Assert(err, IsNil) return state }
func (s *lxcBrokerSuite) TestStartInstance(c *gc.C) { machineId := "1/lxc/0" lxc := s.startInstance(c, machineId) c.Assert(lxc.Id(), gc.Equals, instance.Id("juju-machine-1-lxc-0")) c.Assert(s.lxcContainerDir(lxc), jc.IsDirectory) s.assertInstances(c, lxc) // Uses default network config lxcConfContents, err := ioutil.ReadFile(filepath.Join(s.ContainerDir, string(lxc.Id()), "lxc.conf")) c.Assert(err, gc.IsNil) c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.type = veth") c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.link = lxcbr0") }
func (suite *EnvironSuite) TestInstancesReturnsInstances(c *C) { input := `{"system_id": "test"}` node := suite.testMAASObject.TestServer.NewNode(input) resourceURI, _ := node.GetField("resource_uri") instanceIds := []instance.Id{instance.Id(resourceURI)} instances, err := suite.environ.Instances(instanceIds) c.Check(err, IsNil) c.Check(len(instances), Equals, 1) c.Check(string(instances[0].Id()), Equals, resourceURI) }
func (s *LxcSuite) TestStopContainer(c *gc.C) { manager := lxc.NewContainerManager(lxc.ManagerConfig{}) instance := StartContainer(c, manager, "1/lxc/0") err := manager.StopContainer(instance) c.Assert(err, gc.IsNil) name := string(instance.Id()) // Check that the container dir is no longer in the container dir c.Assert(filepath.Join(s.ContainerDir, name), jc.DoesNotExist) // but instead, in the removed container dir c.Assert(filepath.Join(s.RemovedDir, name), jc.IsDirectory) }
func (s *BootstrapSuite) SetUpSuite(c *C) { s.LoggingSuite.SetUpSuite(c) s.MgoSuite.SetUpSuite(c) stateInfo := environs.BootstrapState{ StateInstances: []instance.Id{instance.Id("dummy.instance.id")}, } stateData, err := goyaml.Marshal(stateInfo) c.Assert(err, IsNil) content := map[string]string{"/" + environs.StateFile: string(stateData)} testRoundTripper.Sub = jujutest.NewCannedRoundTripper(content, nil) s.providerStateURLFile = filepath.Join(c.MkDir(), "provider-state-url") providerStateURLFile = s.providerStateURLFile }
// Bootstrap is specified in the Environ interface. func (env *localEnviron) Bootstrap(cons constraints.Value) error { logger.Infof("bootstrapping environment %q", env.name) if !env.config.runningAsRoot { return fmt.Errorf("bootstrapping a local environment must be done as root") } if err := env.config.createDirs(); err != nil { logger.Errorf("failed to create necessary directories: %v", err) return err } if err := env.ensureCertOwner(); err != nil { logger.Errorf("failed to reassign ownership of the certs to the user: %v", err) return err } // TODO(thumper): check that the constraints don't include "container=lxc" for now. var noRetry = utils.AttemptStrategy{} if err := environs.VerifyBootstrapInit(env, noRetry); err != nil { return err } cert, key, err := env.setupLocalMongoService() if err != nil { return err } // Before we write the agent config file, we need to make sure the // instance is saved in the StateInfo. bootstrapId := instance.Id(boostrapInstanceId) if err := environs.SaveState(env.Storage(), &environs.BootstrapState{StateInstances: []instance.Id{bootstrapId}}); err != nil { logger.Errorf("failed to save state instances: %v", err) return err } // Need to write out the agent file for machine-0 before initializing // state, as as part of that process, it will reset the password in the // agent file. if err := env.writeBootstrapAgentConfFile(cert, key); err != nil { return err } // Have to initialize the state configuration with localhost so we get // "special" permissions. stateConnection, err := env.initialStateConfiguration(boostrapInstanceId, cons) if err != nil { return err } defer stateConnection.Close() return env.setupLocalMachineAgent(cons) }
func (*EnvironSuite) TestGetInstance(c *C) { env := makeEnviron(c) prefix := env.getEnvPrefix() serviceName := prefix + "instance-name" serviceDesc := gwacl.HostedServiceDescriptor{ServiceName: serviceName} service := gwacl.HostedService{HostedServiceDescriptor: serviceDesc} responses := getAzureServiceResponses(c, service) gwacl.PatchManagementAPIResponses(responses) instance, err := env.getInstance("serviceName") c.Check(err, IsNil) c.Check(string(instance.Id()), Equals, serviceName) }
func (suite *StateSuite) TestLoadStateIntegratesWithSaveState(c *C) { storage, cleanup := makeDummyStorage(c) defer cleanup() arch := "amd64" state := environs.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, Characteristics: []instance.HardwareCharacteristics{{Arch: &arch}}} err := environs.SaveState(storage, &state) c.Assert(err, IsNil) storedState, err := environs.LoadState(storage) c.Assert(err, IsNil) c.Check(*storedState, DeepEquals, state) }
func (s *MachineSuite) TestMachineInstanceIdCorrupt(c *C) { machine, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, IsNil) err = s.machines.Update( D{{"_id", machine.Id()}}, D{{"$set", D{{"instanceid", D{{"foo", "bar"}}}}}}, ) c.Assert(err, IsNil) err = machine.Refresh() c.Assert(err, IsNil) iid, err := machine.InstanceId() c.Assert(err, checkers.Satisfies, state.IsNotProvisionedError) c.Assert(iid, Equals, instance.Id("")) }
func (s *MachineSuite) TestMachineInstanceId(c *C) { machine, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, IsNil) err = s.machines.Update( D{{"_id", machine.Id()}}, D{{"$set", D{{"instanceid", "spaceship/0"}}}}, ) c.Assert(err, IsNil) err = machine.Refresh() c.Assert(err, IsNil) iid, err := machine.InstanceId() c.Assert(err, IsNil) c.Assert(iid, Equals, instance.Id("spaceship/0")) }
// StopInstances is specified in the Environ interface. func (env *azureEnviron) StopInstances(instances []instance.Instance) error { // Each Juju instance is an Azure Service (instance==service), destroy // all the Azure services. // Acquire management API object. context, err := env.getManagementAPI() if err != nil { return err } defer env.releaseManagementAPI(context) // Destroy all the services in parallel. servicesToDestroy := make(chan string) // Spawn min(len(instances), maxConcurrentDeletes) goroutines to // destroy the services. nbRoutines := len(instances) if maxConcurrentDeletes < nbRoutines { nbRoutines = maxConcurrentDeletes } var wg sync.WaitGroup wg.Add(nbRoutines) errc := make(chan error, len(instances)) for i := 0; i < nbRoutines; i++ { go func() { defer wg.Done() for serviceName := range servicesToDestroy { request := &gwacl.DestroyHostedServiceRequest{ServiceName: serviceName} err := context.DestroyHostedService(request) if err != nil { errc <- err } } }() } // Feed all the service names to servicesToDestroy. for _, instance := range instances { servicesToDestroy <- string(instance.Id()) } close(servicesToDestroy) wg.Wait() select { case err := <-errc: return err default: } return nil }
func (s *LxcSuite) TestStartContainer(c *gc.C) { manager := lxc.NewContainerManager(lxc.ManagerConfig{}) instance := StartContainer(c, manager, "1/lxc/0") name := string(instance.Id()) // Check our container config files. lxcConfContents, err := ioutil.ReadFile(filepath.Join(s.ContainerDir, name, "lxc.conf")) c.Assert(err, gc.IsNil) c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.link = nic42") cloudInitFilename := filepath.Join(s.ContainerDir, name, "cloud-init") c.Assert(cloudInitFilename, jc.IsNonEmptyFile) data, err := ioutil.ReadFile(cloudInitFilename) c.Assert(err, gc.IsNil) c.Assert(string(data), jc.HasPrefix, "#cloud-config\n") x := make(map[interface{}]interface{}) err = goyaml.Unmarshal(data, &x) c.Assert(err, gc.IsNil) c.Assert(x["apt_proxy"], gc.Equals, aptHTTPProxy) var scripts []string for _, s := range x["runcmd"].([]interface{}) { scripts = append(scripts, s.(string)) } c.Assert(scripts[len(scripts)-4:], gc.DeepEquals, []string{ "start jujud-machine-1-lxc-0", "install -m 644 /dev/null '/etc/apt/apt.conf.d/99proxy-extra'", fmt.Sprintf("echo '%s' > '/etc/apt/apt.conf.d/99proxy-extra'", configProxyExtra), "ifconfig", }) // Check the mount point has been created inside the container. c.Assert(filepath.Join(s.LxcDir, name, "rootfs/var/log/juju"), jc.IsDirectory) // Check that the config file is linked in the restart dir. expectedLinkLocation := filepath.Join(s.RestartDir, name+".conf") expectedTarget := filepath.Join(s.LxcDir, name, "config") linkInfo, err := os.Lstat(expectedLinkLocation) c.Assert(err, gc.IsNil) c.Assert(linkInfo.Mode()&os.ModeSymlink, gc.Equals, os.ModeSymlink) location, err := os.Readlink(expectedLinkLocation) c.Assert(err, gc.IsNil) c.Assert(location, gc.Equals, expectedTarget) }
func (s *LxcSuite) TestStopContainerNameClash(c *gc.C) { manager := lxc.NewContainerManager(lxc.ManagerConfig{}) instance := StartContainer(c, manager, "1/lxc/0") name := string(instance.Id()) targetDir := filepath.Join(s.RemovedDir, name) err := os.MkdirAll(targetDir, 0755) c.Assert(err, gc.IsNil) err = manager.StopContainer(instance) c.Assert(err, gc.IsNil) // Check that the container dir is no longer in the container dir c.Assert(filepath.Join(s.ContainerDir, name), jc.DoesNotExist) // but instead, in the removed container dir with a ".1" suffix as there was already a directory there. c.Assert(filepath.Join(s.RemovedDir, fmt.Sprintf("%s.1", name)), jc.IsDirectory) }
func (suite *StateSuite) TestSaveStateWritesStateFile(c *C) { storage, cleanup := makeDummyStorage(c) defer cleanup() arch := "amd64" state := environs.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, Characteristics: []instance.HardwareCharacteristics{{Arch: &arch}}} marshaledState, err := goyaml.Marshal(state) c.Assert(err, IsNil) err = environs.SaveState(storage, &state) c.Assert(err, IsNil) loadedState, err := storage.Get(environs.StateFile) c.Assert(err, IsNil) content, err := ioutil.ReadAll(loadedState) c.Assert(err, IsNil) c.Check(content, DeepEquals, marshaledState) }
// StopInstances is specified in the Environ interface. func (env *azureEnviron) StopInstances(instances []instance.Instance) error { // Each Juju instance is an Azure Service (instance==service), destroy // all the Azure services. // Acquire management API object. context, err := env.getManagementAPI() if err != nil { return err } defer env.releaseManagementAPI(context) // Shut down all the instances; if there are errors, return only the // first one (but try to shut down all instances regardless). var firstErr error for _, instance := range instances { request := &gwacl.DestroyHostedServiceRequest{ServiceName: string(instance.Id())} err := context.DestroyHostedService(request) if err != nil && firstErr == nil { firstErr = err } } return firstErr }
func (env *localEnviron) initialStateConfiguration(addr string, cons constraints.Value) (*state.State, error) { // We don't check the existance of the CACert here as if it wasn't set, we // wouldn't get this far. cfg := env.config.Config caCert, _ := cfg.CACert() addr = fmt.Sprintf("%s:%d", addr, cfg.StatePort()) info := &state.Info{ Addrs: []string{addr}, CACert: caCert, } timeout := state.DialOpts{60 * time.Second} bootstrap, err := environs.BootstrapConfig(cfg) if err != nil { return nil, err } st, err := state.Initialize(info, bootstrap, timeout) if err != nil { logger.Errorf("failed to initialize state: %v", err) return nil, err } logger.Debugf("state initialized") passwordHash := utils.PasswordHash(cfg.AdminSecret()) if err := environs.BootstrapUsers(st, cfg, passwordHash); err != nil { st.Close() return nil, err } jobs := []state.MachineJob{state.JobManageEnviron, state.JobManageState} if err := environs.ConfigureBootstrapMachine( st, cons, env.config.rootDir(), jobs, instance.Id(boostrapInstanceId), instance.HardwareCharacteristics{}); err != nil { st.Close() return nil, err } // Return an open state reference. return st, nil }
func (*EnvironSuite) TestStateInfo(c *C) { instanceID := "my-instance" patchWithServiceListResponse(c, []gwacl.HostedServiceDescriptor{{ ServiceName: instanceID, }}) env := makeEnviron(c) cleanup := setDummyStorage(c, env) defer cleanup() err := environs.SaveState( env.Storage(), &environs.BootstrapState{StateInstances: []instance.Id{instance.Id(instanceID)}}) c.Assert(err, IsNil) stateInfo, apiInfo, err := env.StateInfo() c.Assert(err, IsNil) config := env.Config() dnsName := "my-instance." + AZURE_DOMAIN_NAME stateServerAddr := fmt.Sprintf("%s:%d", dnsName, config.StatePort()) apiServerAddr := fmt.Sprintf("%s:%d", dnsName, config.APIPort()) c.Check(stateInfo.Addrs, DeepEquals, []string{stateServerAddr}) c.Check(apiInfo.Addrs, DeepEquals, []string{apiServerAddr}) }