func (s *environSuite) TestInstances(c *gc.C) { var ids []instance.Id instances, err := s.env.Instances(ids) c.Assert(err, gc.Equals, environs.ErrNoInstances) c.Assert(instances, gc.HasLen, 0) ids = append(ids, manual.BootstrapInstanceId) instances, err = s.env.Instances(ids) c.Assert(err, gc.IsNil) c.Assert(instances, gc.HasLen, 1) c.Assert(instances[0], gc.NotNil) ids = append(ids, manual.BootstrapInstanceId) instances, err = s.env.Instances(ids) c.Assert(err, gc.IsNil) c.Assert(instances, gc.HasLen, 2) c.Assert(instances[0], gc.NotNil) c.Assert(instances[1], gc.NotNil) ids = append(ids, instance.Id("invalid")) instances, err = s.env.Instances(ids) c.Assert(err, gc.Equals, environs.ErrPartialInstances) c.Assert(instances, gc.HasLen, 3) c.Assert(instances[0], gc.NotNil) c.Assert(instances[1], gc.NotNil) c.Assert(instances[2], gc.IsNil) ids = []instance.Id{instance.Id("invalid")} instances, err = s.env.Instances(ids) c.Assert(err, gc.Equals, environs.ErrNoInstances) c.Assert(instances, gc.HasLen, 1) c.Assert(instances[0], gc.IsNil) }
func (*utilSuite) TestGetSystemIdValues(c *gc.C) { instanceId1 := instance.Id("/MAAS/api/1.0/nodes/system_id1/") instanceId2 := instance.Id("/MAAS/api/1.0/nodes/system_id2/") instanceIds := []instance.Id{instanceId1, instanceId2} values := getSystemIdValues(instanceIds) c.Check(values["id"], gc.DeepEquals, []string{"system_id1", "system_id2"}) }
// setUpManual adds "manually provisioned" machines to state: // one manager machine, and one non-manager. func (s *destroyEnvironmentSuite) setUpManual(c *gc.C) (m0, m1 *state.Machine) { m0, err := s.State.AddMachine("precise", state.JobManageEnviron) c.Assert(err, gc.IsNil) err = m0.SetProvisioned(instance.Id("manual:0"), "manual:0:fake_nonce", nil) c.Assert(err, gc.IsNil) m1, err = s.State.AddMachine("precise", state.JobHostUnits) c.Assert(err, gc.IsNil) err = m1.SetProvisioned(instance.Id("manual:1"), "manual:1:fake_nonce", nil) c.Assert(err, gc.IsNil) return m0, m1 }
func (s *BootstrapSuite) TestCannotRecordStartedInstance(c *gc.C) { innerStorage := newStorage(s, c) stor := &mockStorage{Storage: innerStorage} startInstance := func( _ string, _ constraints.Value, _, _ []string, _ tools.List, _ *cloudinit.MachineConfig, ) ( instance.Instance, *instance.HardwareCharacteristics, []network.Info, error, ) { stor.putErr = fmt.Errorf("suddenly a wild blah") return &mockInstance{id: "i-blah"}, nil, nil, nil } var stopped []instance.Instance stopInstances := func(instances []instance.Instance) error { stopped = append(stopped, instances...) return nil } env := &mockEnviron{ storage: stor, startInstance: startInstance, stopInstances: stopInstances, config: configGetter(c), } ctx := coretesting.Context(c) err := common.Bootstrap(ctx, env, environs.BootstrapParams{}) c.Assert(err, gc.ErrorMatches, "cannot save state: suddenly a wild blah") c.Assert(stopped, gc.HasLen, 1) c.Assert(stopped[0].Id(), gc.Equals, instance.Id("i-blah")) }
func restoreBootstrapMachine(conn *juju.APIConn, backupFile string, creds credentials) (newInstId instance.Id, addr string, err error) { addr, err = conn.State.Client().PublicAddress("0") if err != nil { return "", "", fmt.Errorf("cannot get public address of bootstrap machine: %v", err) } status, err := conn.State.Client().Status(nil) if err != nil { return "", "", fmt.Errorf("cannot get environment status: %v", err) } info, ok := status.Machines["0"] if !ok { return "", "", fmt.Errorf("cannot find bootstrap machine in status") } newInstId = instance.Id(info.InstanceId) progress("copying backup file to bootstrap host") if err := sendViaScp(backupFile, addr, "~/juju-backup.tgz"); err != nil { return "", "", fmt.Errorf("cannot copy backup file to bootstrap instance: %v", err) } progress("updating bootstrap machine") if err := runViaSsh(addr, updateBootstrapMachineScript(newInstId, creds)); err != nil { return "", "", fmt.Errorf("update script failed: %v", err) } return newInstId, addr, nil }
// countPolls sets up a machine loop with the given // addresses and status to be returned from getInstanceInfo, // waits for coretesting.ShortWait, and returns the // number of times the instance is polled. func countPolls(c *gc.C, addrs []instance.Address, instId, instStatus string, machineStatus params.Status) int { count := int32(0) getInstanceInfo := func(id instance.Id) (instanceInfo, error) { c.Check(string(id), gc.Equals, instId) atomic.AddInt32(&count, 1) if addrs == nil { return instanceInfo{}, fmt.Errorf("no instance addresses available") } return instanceInfo{addrs, instStatus}, nil } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, dyingc: make(chan struct{}), } m := &testMachine{ id: "99", instanceId: instance.Id(instId), refresh: func() error { return nil }, addresses: addrs, life: state.Alive, status: machineStatus, } died := make(chan machine) go runMachine(context, m, nil, died) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killAllErr, gc.Equals, nil) return int(count) }
func (s *machineSuite) TestSinglePollWhenInstancInfoUnimplemented(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, 1*time.Millisecond) count := int32(0) getInstanceInfo := func(id instance.Id) (instanceInfo, error) { c.Check(id, gc.Equals, instance.Id("i1234")) atomic.AddInt32(&count, 1) return instanceInfo{}, errors.NotImplementedf("instance address") } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, dyingc: make(chan struct{}), } m := &testMachine{ id: "99", instanceId: "i1234", refresh: func() error { return nil }, life: state.Alive, } died := make(chan machine) go runMachine(context, m, nil, died) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killAllErr, gc.Equals, nil) c.Assert(count, gc.Equals, int32(1)) }
func (s *machineConfigSuite) TestMachineConfig(c *gc.C) { addrs := []instance.Address{instance.NewAddress("1.2.3.4", instance.NetworkUnknown)} hc := instance.MustParseHardware("mem=4G arch=amd64") apiParams := params.AddMachineParams{ Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", HardwareCharacteristics: hc, Addrs: addrs, } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) c.Assert(len(machines), gc.Equals, 1) machineId := machines[0].Machine machineConfig, err := client.MachineConfig(s.State, machineId, apiParams.Nonce, "") c.Assert(err, gc.IsNil) envConfig, err := s.State.EnvironConfig() c.Assert(err, gc.IsNil) env, err := environs.New(envConfig) c.Assert(err, gc.IsNil) stateInfo, apiInfo, err := env.StateInfo() c.Assert(err, gc.IsNil) c.Check(machineConfig.StateInfo.Addrs, gc.DeepEquals, stateInfo.Addrs) c.Check(machineConfig.APIInfo.Addrs, gc.DeepEquals, apiInfo.Addrs) c.Assert(machineConfig.Tools.URL, gc.Not(gc.Equals), "") }
func (*utilSuite) TestExtractSystemId(c *gc.C) { instanceId := instance.Id("/MAAS/api/1.0/nodes/system_id/") systemId := extractSystemId(instanceId) c.Check(systemId, gc.Equals, "system_id") }
func (s *withoutStateServerSuite) TestSetProvisioned(c *gc.C) { // Provision machine 0 first. hwChars := instance.MustParseHardware("arch=i386", "mem=4G") err := s.machines[0].SetProvisioned("i-am", "fake_nonce", &hwChars) c.Assert(err, gc.IsNil) args := params.SetProvisioned{Machines: []params.MachineSetProvisioned{ {Tag: s.machines[0].Tag(), InstanceId: "i-was", Nonce: "fake_nonce", Characteristics: nil}, {Tag: s.machines[1].Tag(), InstanceId: "i-will", Nonce: "fake_nonce", Characteristics: &hwChars}, {Tag: s.machines[2].Tag(), InstanceId: "i-am-too", Nonce: "fake", Characteristics: nil}, {Tag: "machine-42", InstanceId: "", Nonce: "", Characteristics: nil}, {Tag: "unit-foo-0", InstanceId: "", Nonce: "", Characteristics: nil}, {Tag: "service-bar", InstanceId: "", Nonce: "", Characteristics: nil}, }} result, err := s.provisioner.SetProvisioned(args) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {¶ms.Error{ Message: `cannot set instance data for machine "0": already set`, }}, {nil}, {nil}, {apiservertesting.NotFoundError("machine 42")}, {apiservertesting.ErrUnauthorized}, {apiservertesting.ErrUnauthorized}, }, }) // Verify machine 1 and 2 were provisioned. c.Assert(s.machines[1].Refresh(), gc.IsNil) c.Assert(s.machines[2].Refresh(), gc.IsNil) instanceId, err := s.machines[1].InstanceId() c.Assert(err, gc.IsNil) c.Check(instanceId, gc.Equals, instance.Id("i-will")) instanceId, err = s.machines[2].InstanceId() c.Assert(err, gc.IsNil) c.Check(instanceId, gc.Equals, instance.Id("i-am-too")) c.Check(s.machines[1].CheckProvisioned("fake_nonce"), jc.IsTrue) c.Check(s.machines[2].CheckProvisioned("fake"), jc.IsTrue) gotHardware, err := s.machines[1].HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Check(gotHardware, gc.DeepEquals, &hwChars) }
func (s *instanceTest) TestString(c *gc.C) { jsonValue := `{"hostname": "thethingintheplace", "system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} hostname, err := instance.DNSName() c.Assert(err, gc.IsNil) expected := hostname + ":" + string(instance.Id()) c.Assert(fmt.Sprint(instance), gc.Equals, expected) }
func (s *DestroySuite) TestCannotStopInstances(c *gc.C) { env := &mockEnviron{ allInstances: func() ([]instance.Instance, error) { return []instance.Instance{ &mockInstance{id: "one"}, &mockInstance{id: "another"}, }, nil }, stopInstances: func(instances []instance.Instance) error { c.Assert(instances, gc.HasLen, 2) c.Assert(instances[0].Id(), gc.Equals, instance.Id("one")) c.Assert(instances[1].Id(), gc.Equals, instance.Id("another")) return fmt.Errorf("nah") }, } err := common.Destroy(env) c.Assert(err, gc.ErrorMatches, "nah") }
func (s *kvmProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { s.State.StartSync() event := <-s.events c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, gc.IsNil) s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
func (s *instanceTest) TestStringWithoutHostname(c *gc.C) { // For good measure, test what happens if we don't have a hostname. jsonValue := `{"system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} _, err := instance.DNSName() c.Assert(err, gc.NotNil) expected := fmt.Sprintf("<DNSName failed: %q>", err) + ":" + string(instance.Id()) c.Assert(fmt.Sprint(instance), gc.Equals, expected) }
func (suite *StateSuite) setUpSavedState(c *gc.C, dataDir string) bootstrap.BootstrapState { state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } content, err := goyaml.Marshal(state) c.Assert(err, gc.IsNil) err = ioutil.WriteFile(filepath.Join(dataDir, bootstrap.StateFile), []byte(content), 0644) c.Assert(err, gc.IsNil) return state }
func (s *BootstrapSuite) TestInitializeEnvironment(c *gc.C) { hw := instance.MustParseHardware("arch=amd64 mem=8G") machConf, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.envcfg, "--instance-id", string(s.instanceId), "--hardware", hw.String()) c.Assert(err, gc.IsNil) err = cmd.Run(nil) c.Assert(err, gc.IsNil) c.Assert(s.fakeEnsureMongo.dataDir, gc.Equals, s.dataDir) c.Assert(s.fakeEnsureMongo.initiateCount, gc.Equals, 1) c.Assert(s.fakeEnsureMongo.ensureCount, gc.Equals, 1) c.Assert(s.fakeEnsureMongo.dataDir, gc.Equals, s.dataDir) c.Assert(s.fakeEnsureMongo.withHA, jc.IsTrue) expectInfo, exists := machConf.StateServingInfo() c.Assert(exists, jc.IsTrue) c.Assert(expectInfo.SharedSecret, gc.Equals, "") servingInfo := s.fakeEnsureMongo.info c.Assert(len(servingInfo.SharedSecret), gc.Not(gc.Equals), 0) servingInfo.SharedSecret = "" c.Assert(servingInfo, jc.DeepEquals, expectInfo) expectDialAddrs := []string{fmt.Sprintf("127.0.0.1:%d", expectInfo.StatePort)} gotDialAddrs := s.fakeEnsureMongo.initiateParams.DialInfo.Addrs c.Assert(gotDialAddrs, gc.DeepEquals, expectDialAddrs) memberHost := fmt.Sprintf("%s:%d", s.bootstrapName, expectInfo.StatePort) c.Assert(s.fakeEnsureMongo.initiateParams.MemberHostPort, gc.Equals, memberHost) c.Assert(s.fakeEnsureMongo.initiateParams.User, gc.Equals, "") c.Assert(s.fakeEnsureMongo.initiateParams.Password, gc.Equals, "") st, err := state.Open(&state.Info{ Addrs: []string{testing.MgoServer.Addr()}, CACert: testing.CACert, Password: testPasswordHash(), }, state.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, gc.IsNil) defer st.Close() machines, err := st.AllMachines() c.Assert(err, gc.IsNil) c.Assert(machines, gc.HasLen, 1) instid, err := machines[0].InstanceId() c.Assert(err, gc.IsNil) c.Assert(instid, gc.Equals, instance.Id(string(s.instanceId))) stateHw, err := machines[0].HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(stateHw, gc.NotNil) c.Assert(*stateHw, gc.DeepEquals, hw) cons, err := st.EnvironConstraints() c.Assert(err, gc.IsNil) c.Assert(&cons, jc.Satisfies, constraints.IsEmpty) }
// StopInstances shuts down the given instances. func (broker *lxcBroker) StopInstances(instances []instance.Instance) error { // TODO: potentially parallelise. for _, instance := range instances { lxcLogger.Infof("stopping lxc container for instance: %s", instance.Id()) if err := broker.manager.DestroyContainer(instance); err != nil { lxcLogger.Errorf("container did not stop: %v", err) return err } } return nil }
func (suite *environSuite) TestInstancesReturnsErrorIfPartialInstances(c *gc.C) { known := suite.addNode(allocatedNode) suite.addNode(`{"system_id": "test2"}`) unknown := instance.Id("unknown systemID") instances, err := suite.makeEnviron().Instances([]instance.Id{known, unknown}) c.Check(err, gc.Equals, environs.ErrPartialInstances) c.Assert(instances, gc.HasLen, 2) c.Check(instances[0].Id(), gc.Equals, known) c.Check(instances[1], gc.IsNil) }
func (suite *StateSuite) TestLoadStateIntegratesWithSaveState(c *gc.C) { storage := suite.newStorage(c) state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } err := bootstrap.SaveState(storage, &state) c.Assert(err, gc.IsNil) storedState, err := bootstrap.LoadState(storage) c.Assert(err, gc.IsNil) c.Check(*storedState, gc.DeepEquals, state) }
func (s *KVMSuite) TestDestroyContainer(c *gc.C) { instance := containertesting.CreateContainer(c, s.manager, "1/lxc/0") err := s.manager.DestroyContainer(instance) c.Assert(err, gc.IsNil) name := string(instance.Id()) // Check that the container dir is no longer in the container dir c.Assert(filepath.Join(s.ContainerDir, name), jc.DoesNotExist) // but instead, in the removed container dir c.Assert(filepath.Join(s.RemovedDir, name), jc.IsDirectory) }
func (s *lxcBrokerSuite) TestStartInstance(c *gc.C) { machineId := "1/lxc/0" lxc := s.startInstance(c, machineId) c.Assert(lxc.Id(), gc.Equals, instance.Id("juju-machine-1-lxc-0")) c.Assert(s.lxcContainerDir(lxc), jc.IsDirectory) s.assertInstances(c, lxc) // Uses default network config lxcConfContents, err := ioutil.ReadFile(filepath.Join(s.ContainerDir, string(lxc.Id()), "lxc.conf")) c.Assert(err, gc.IsNil) c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.type = veth") c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.link = lxcbr0") }
func (s *machineConfigSuite) TestMachineConfigNoArch(c *gc.C) { apiParams := params.AddMachineParams{ Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) c.Assert(len(machines), gc.Equals, 1) _, err = client.MachineConfig(s.State, machines[0].Machine, apiParams.Nonce, "") c.Assert(err, gc.ErrorMatches, fmt.Sprintf("arch is not set for %q", "machine-"+machines[0].Machine)) }
func (s *PrecheckerSuite) TestPrecheckInstanceInjectMachine(c *gc.C) { template := state.MachineTemplate{ InstanceId: instance.Id("bootstrap"), Series: "precise", Nonce: state.BootstrapNonce, Jobs: []state.MachineJob{state.JobManageEnviron}, Placement: "anyoldthing", } _, err := s.State.AddOneMachine(template) c.Assert(err, gc.IsNil) // PrecheckInstance should not have been called, as we've // injected a machine with an existing instance. c.Assert(s.prechecker.precheckInstanceSeries, gc.Equals, "") c.Assert(s.prechecker.precheckInstancePlacement, gc.Equals, "") }
func (s *machineSuite) TestInstanceId(c *gc.C) { // Add another, not provisioned machine to test // CodeNotProvisioned. newMachine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) apiNewMachine, err := s.firewaller.Machine(newMachine.Tag()) c.Assert(err, gc.IsNil) _, err = apiNewMachine.InstanceId() c.Assert(err, gc.ErrorMatches, "machine 3 is not provisioned") c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) instanceId, err := s.apiMachine.InstanceId() c.Assert(err, gc.IsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) }
// checkStopSomeInstances checks that instancesToStop are stopped while instancesToKeep are not. func (s *CommonProvisionerSuite) checkStopSomeInstances(c *gc.C, instancesToStop []instance.Instance, instancesToKeep []instance.Instance) { s.BackingState.StartSync() instanceIdsToStop := set.NewStrings() for _, instance := range instancesToStop { instanceIdsToStop.Add(string(instance.Id())) } instanceIdsToKeep := set.NewStrings() for _, instance := range instancesToKeep { instanceIdsToKeep.Add(string(instance.Id())) } // Continue checking for stop instance calls until all the instances we // are waiting on to finish, actually finish, or we time out. for !instanceIdsToStop.IsEmpty() { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStopInstances: for _, stoppedInstance := range o.Instances { instId := string(stoppedInstance.Id()) instanceIdsToStop.Remove(instId) if instanceIdsToKeep.Contains(instId) { c.Errorf("provisioner unexpectedly stopped instance %s", instId) } } default: c.Fatalf("unexpected operation %#v", o) return } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not stop an instance") return } } }
func (s *statusSuite) TestLegacyStatus(c *gc.C) { machine := s.addMachine(c) instanceId := "i-fakeinstance" err := machine.SetProvisioned(instance.Id(instanceId), "fakenonce", nil) c.Assert(err, gc.IsNil) client := s.APIState.Client() status, err := client.LegacyStatus() c.Assert(err, gc.IsNil) c.Check(status.Machines, gc.HasLen, 1) resultMachine, ok := status.Machines[machine.Id()] if !ok { c.Fatalf("Missing machine with id %q", machine.Id()) } c.Check(resultMachine.InstanceId, gc.Equals, instanceId) }
// gatherMachineParams collects all the information we know about the machine // we are about to provision. It will SSH into that machine as the ubuntu user. // The hostname supplied should not include a username. // If we can, we will reverse lookup the hostname by its IP address, and use // the DNS resolved name, rather than the name that was supplied func gatherMachineParams(hostname string) (*params.AddMachineParams, error) { // Generate a unique nonce for the machine. uuid, err := utils.NewUUID() if err != nil { return nil, err } var addrs []instance.Address if addr, err := HostAddress(hostname); err != nil { logger.Warningf("failed to compute public address for %q: %v", hostname, err) } else { addrs = append(addrs, addr) } provisioned, err := checkProvisioned(hostname) if err != nil { err = fmt.Errorf("error checking if provisioned: %v", err) return nil, err } if provisioned { return nil, ErrProvisioned } hc, series, err := DetectSeriesAndHardwareCharacteristics(hostname) if err != nil { err = fmt.Errorf("error detecting hardware characteristics: %v", err) return nil, err } // There will never be a corresponding "instance" that any provider // knows about. This is fine, and works well with the provisioner // task. The provisioner task will happily remove any and all dead // machines from state, but will ignore the associated instance ID // if it isn't one that the environment provider knows about. instanceId := instance.Id(manualInstancePrefix + hostname) nonce := fmt.Sprintf("%s:%s", instanceId, uuid.String()) machineParams := ¶ms.AddMachineParams{ Series: series, HardwareCharacteristics: hc, InstanceId: instanceId, Nonce: nonce, Addrs: addrs, Jobs: []params.MachineJob{params.JobHostUnits}, } return machineParams, nil }
// commonServiceInstances returns instances with // services in common with the specified machine. func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) { units, err := m.Units() if err != nil { return nil, err } var instanceIdSet set.Strings for _, unit := range units { if !unit.IsPrincipal() { continue } service, err := unit.Service() if err != nil { return nil, err } allUnits, err := service.AllUnits() if err != nil { return nil, err } for _, unit := range allUnits { machineId, err := unit.AssignedMachineId() if state.IsNotAssigned(err) { continue } else if err != nil { return nil, err } machine, err := st.Machine(machineId) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instanceIdSet.Add(string(instanceId)) } else if state.IsNotProvisionedError(err) { continue } else { return nil, err } } } instanceIds := make([]instance.Id, instanceIdSet.Size()) // Sort values to simplify testing. for i, instanceId := range instanceIdSet.SortedValues() { instanceIds[i] = instance.Id(instanceId) } return instanceIds, nil }
func (s *machineConfigSuite) TestMachineConfigNoTools(c *gc.C) { s.PatchValue(&envtools.DefaultBaseURL, "") addrs := []instance.Address{instance.NewAddress("1.2.3.4", instance.NetworkUnknown)} hc := instance.MustParseHardware("mem=4G arch=amd64") apiParams := params.AddMachineParams{ Series: "quantal", Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", HardwareCharacteristics: hc, Addrs: addrs, } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) _, err = client.MachineConfig(s.State, machines[0].Machine, apiParams.Nonce, "") c.Assert(err, gc.ErrorMatches, coretools.ErrNoMatches.Error()) }
func (suite *StateSuite) TestSaveStateWritesStateFile(c *gc.C) { stor := suite.newStorage(c) state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } marshaledState, err := goyaml.Marshal(state) c.Assert(err, gc.IsNil) err = bootstrap.SaveState(stor, &state) c.Assert(err, gc.IsNil) loadedState, err := storage.Get(stor, bootstrap.StateFile) c.Assert(err, gc.IsNil) content, err := ioutil.ReadAll(loadedState) c.Assert(err, gc.IsNil) c.Check(content, gc.DeepEquals, marshaledState) }