func (s *KVMSuite) TestDestroyContainer(c *gc.C) { instance := containertesting.CreateContainer(c, s.manager, "1/lxc/0") err := s.manager.DestroyContainer(instance.Id()) c.Assert(err, gc.IsNil) name := string(instance.Id()) // Check that the container dir is no longer in the container dir c.Assert(filepath.Join(s.ContainerDir, name), jc.DoesNotExist) // but instead, in the removed container dir c.Assert(filepath.Join(s.RemovedDir, name), jc.IsDirectory) }
func (s *machineConfigSuite) TestMachineConfig(c *gc.C) { addrs := []instance.Address{instance.NewAddress("1.2.3.4", instance.NetworkUnknown)} hc := instance.MustParseHardware("mem=4G arch=amd64") apiParams := params.AddMachineParams{ Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", HardwareCharacteristics: hc, Addrs: addrs, } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) c.Assert(len(machines), gc.Equals, 1) machineId := machines[0].Machine machineConfig, err := client.MachineConfig(s.State, machineId, apiParams.Nonce, "") c.Assert(err, gc.IsNil) envConfig, err := s.State.EnvironConfig() c.Assert(err, gc.IsNil) env, err := environs.New(envConfig) c.Assert(err, gc.IsNil) stateInfo, apiInfo, err := env.StateInfo() c.Assert(err, gc.IsNil) c.Check(machineConfig.StateInfo.Addrs, gc.DeepEquals, stateInfo.Addrs) c.Check(machineConfig.APIInfo.Addrs, gc.DeepEquals, apiInfo.Addrs) c.Assert(machineConfig.Tools.URL, gc.Not(gc.Equals), "") }
func (s *instanceTest) TestString(c *gc.C) { jsonValue := `{"hostname": "thethingintheplace", "system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} hostname, err := instance.DNSName() c.Assert(err, gc.IsNil) expected := hostname + ":" + string(instance.Id()) c.Assert(fmt.Sprint(instance), gc.Equals, expected) }
func (suite *StateSuite) setUpSavedState(c *gc.C, dataDir string) bootstrap.BootstrapState { state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } content, err := goyaml.Marshal(state) c.Assert(err, gc.IsNil) err = ioutil.WriteFile(filepath.Join(dataDir, bootstrap.StateFile), []byte(content), 0644) c.Assert(err, gc.IsNil) return state }
func (s *instanceTest) TestStringWithoutHostname(c *gc.C) { // For good measure, test what happens if we don't have a hostname. jsonValue := `{"system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} _, err := instance.DNSName() c.Assert(err, gc.NotNil) expected := fmt.Sprintf("<DNSName failed: %q>", err) + ":" + string(instance.Id()) c.Assert(fmt.Sprint(instance), gc.Equals, expected) }
func shutdownMachines(manager container.Manager) func(*gc.C) { return func(c *gc.C) { instances, err := manager.ListContainers() c.Assert(err, gc.IsNil) for _, instance := range instances { err := manager.DestroyContainer(instance.Id()) c.Check(err, gc.IsNil) } } }
func (suite *environSuite) TestInstancesReturnsErrorIfPartialInstances(c *gc.C) { known := suite.addNode(allocatedNode) suite.addNode(`{"system_id": "test2"}`) unknown := instance.Id("unknown systemID") instances, err := suite.makeEnviron().Instances([]instance.Id{known, unknown}) c.Check(err, gc.Equals, environs.ErrPartialInstances) c.Assert(instances, gc.HasLen, 2) c.Check(instances[0].Id(), gc.Equals, known) c.Check(instances[1], gc.IsNil) }
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { s.State.StartSync() event := <-s.events c.Assert(event.Action, gc.Equals, mock.Created) event = <-s.events c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, gc.IsNil) s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
func (s *BootstrapSuite) TestInitializeEnvironment(c *gc.C) { hw := instance.MustParseHardware("arch=amd64 mem=8G") machConf, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.envcfg, "--instance-id", string(s.instanceId), "--hardware", hw.String()) c.Assert(err, gc.IsNil) err = cmd.Run(nil) c.Assert(err, gc.IsNil) c.Assert(s.fakeEnsureMongo.dataDir, gc.Equals, s.dataDir) c.Assert(s.fakeEnsureMongo.initiateCount, gc.Equals, 1) c.Assert(s.fakeEnsureMongo.ensureCount, gc.Equals, 1) c.Assert(s.fakeEnsureMongo.dataDir, gc.Equals, s.dataDir) expectInfo, exists := machConf.StateServingInfo() c.Assert(exists, jc.IsTrue) c.Assert(expectInfo.SharedSecret, gc.Equals, "") servingInfo := s.fakeEnsureMongo.info c.Assert(len(servingInfo.SharedSecret), gc.Not(gc.Equals), 0) servingInfo.SharedSecret = "" c.Assert(servingInfo, jc.DeepEquals, expectInfo) expectDialAddrs := []string{fmt.Sprintf("127.0.0.1:%d", expectInfo.StatePort)} gotDialAddrs := s.fakeEnsureMongo.initiateParams.DialInfo.Addrs c.Assert(gotDialAddrs, gc.DeepEquals, expectDialAddrs) memberHost := fmt.Sprintf("%s:%d", s.bootstrapName, expectInfo.StatePort) c.Assert(s.fakeEnsureMongo.initiateParams.MemberHostPort, gc.Equals, memberHost) c.Assert(s.fakeEnsureMongo.initiateParams.User, gc.Equals, "") c.Assert(s.fakeEnsureMongo.initiateParams.Password, gc.Equals, "") st, err := state.Open(&state.Info{ Addrs: []string{testing.MgoServer.Addr()}, CACert: testing.CACert, Password: testPasswordHash(), }, state.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, gc.IsNil) defer st.Close() machines, err := st.AllMachines() c.Assert(err, gc.IsNil) c.Assert(machines, gc.HasLen, 1) instid, err := machines[0].InstanceId() c.Assert(err, gc.IsNil) c.Assert(instid, gc.Equals, instance.Id(string(s.instanceId))) stateHw, err := machines[0].HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(stateHw, gc.NotNil) c.Assert(*stateHw, gc.DeepEquals, hw) cons, err := st.EnvironConstraints() c.Assert(err, gc.IsNil) c.Assert(&cons, jc.Satisfies, constraints.IsEmpty) }
func (s *machineConfigSuite) TestMachineConfigNoArch(c *gc.C) { apiParams := params.AddMachineParams{ Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) c.Assert(len(machines), gc.Equals, 1) _, err = client.MachineConfig(s.State, machines[0].Machine, apiParams.Nonce, "") c.Assert(err, gc.ErrorMatches, fmt.Sprintf("arch is not set for %q", "machine-"+machines[0].Machine)) }
func (suite *StateSuite) TestLoadStateIntegratesWithSaveState(c *gc.C) { storage := suite.newStorage(c) state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } err := bootstrap.SaveState(storage, &state) c.Assert(err, gc.IsNil) storedState, err := bootstrap.LoadState(storage) c.Assert(err, gc.IsNil) c.Check(*storedState, gc.DeepEquals, state) }
func (s *lxcBrokerSuite) TestStartInstance(c *gc.C) { machineId := "1/lxc/0" lxc := s.startInstance(c, machineId) c.Assert(lxc.Id(), gc.Equals, instance.Id("juju-machine-1-lxc-0")) c.Assert(s.lxcContainerDir(lxc), jc.IsDirectory) s.assertInstances(c, lxc) // Uses default network config lxcConfContents, err := ioutil.ReadFile(filepath.Join(s.ContainerDir, string(lxc.Id()), "lxc.conf")) c.Assert(err, gc.IsNil) c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.type = veth") c.Assert(string(lxcConfContents), jc.Contains, "lxc.network.link = lxcbr0") }
func (s *InstanceDistributorSuite) setupScenario(c *gc.C) { // Assign a unit so we have a non-empty distribution group, and // provision all instances so we have candidates. unit, err := s.wordpress.AddUnit() c.Assert(err, gc.IsNil) err = unit.AssignToMachine(s.machines[0]) c.Assert(err, gc.IsNil) for i, m := range s.machines { instId := instance.Id(fmt.Sprintf("i-blah-%d", i)) err = m.SetProvisioned(instId, "fake-nonce", nil) c.Assert(err, gc.IsNil) } }
// checkStopSomeInstances checks that instancesToStop are stopped while instancesToKeep are not. func (s *CommonProvisionerSuite) checkStopSomeInstances(c *gc.C, instancesToStop []instance.Instance, instancesToKeep []instance.Instance) { s.BackingState.StartSync() instanceIdsToStop := set.NewStrings() for _, instance := range instancesToStop { instanceIdsToStop.Add(string(instance.Id())) } instanceIdsToKeep := set.NewStrings() for _, instance := range instancesToKeep { instanceIdsToKeep.Add(string(instance.Id())) } // Continue checking for stop instance calls until all the instances we // are waiting on to finish, actually finish, or we time out. for !instanceIdsToStop.IsEmpty() { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStopInstances: for _, id := range o.Ids { instId := string(id) instanceIdsToStop.Remove(instId) if instanceIdsToKeep.Contains(instId) { c.Errorf("provisioner unexpectedly stopped instance %s", instId) } } default: c.Fatalf("unexpected operation %#v", o) return } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not stop an instance") return } } }
func (s *machineSuite) TestInstanceId(c *gc.C) { // Add another, not provisioned machine to test // CodeNotProvisioned. newMachine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) apiNewMachine, err := s.firewaller.Machine(newMachine.Tag()) c.Assert(err, gc.IsNil) _, err = apiNewMachine.InstanceId() c.Assert(err, gc.ErrorMatches, "machine 3 is not provisioned") c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) instanceId, err := s.apiMachine.InstanceId() c.Assert(err, gc.IsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) }
func (suite *StateSuite) TestSaveStateWritesStateFile(c *gc.C) { stor := suite.newStorage(c) state := bootstrap.BootstrapState{ StateInstances: []instance.Id{instance.Id("an-instance-id")}, } marshaledState, err := goyaml.Marshal(state) c.Assert(err, gc.IsNil) err = bootstrap.SaveState(stor, &state) c.Assert(err, gc.IsNil) loadedState, err := storage.Get(stor, bootstrap.StateFile) c.Assert(err, gc.IsNil) content, err := ioutil.ReadAll(loadedState) c.Assert(err, gc.IsNil) c.Check(content, gc.DeepEquals, marshaledState) }
func (s *machineConfigSuite) TestMachineConfigNoTools(c *gc.C) { s.PatchValue(&envtools.DefaultBaseURL, "") addrs := []instance.Address{instance.NewAddress("1.2.3.4", instance.NetworkUnknown)} hc := instance.MustParseHardware("mem=4G arch=amd64") apiParams := params.AddMachineParams{ Series: "quantal", Jobs: []params.MachineJob{params.JobHostUnits}, InstanceId: instance.Id("1234"), Nonce: "foo", HardwareCharacteristics: hc, Addrs: addrs, } machines, err := s.APIState.Client().AddMachines([]params.AddMachineParams{apiParams}) c.Assert(err, gc.IsNil) _, err = client.MachineConfig(s.State, machines[0].Machine, apiParams.Nonce, "") c.Assert(err, gc.ErrorMatches, coretools.ErrNoMatches.Error()) }
// InstanceId returns the provider specific instance id for the // machine or an CodeNotProvisioned error, if not set. func (m *Machine) InstanceId() (instance.Id, error) { var results params.StringResults args := params.Entities{ Entities: []params.Entity{{Tag: m.tag}}, } err := m.st.call("InstanceId", args, &results) if err != nil { return "", err } if len(results.Results) != 1 { return "", fmt.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return "", result.Error } return instance.Id(result.Result), nil }
// initState initializes the fake state with a single // replicaset member and numMachines machines // primed to vote. func initState(c *gc.C, st *fakeState, numMachines int) { var ids []string for i := 10; i < 10+numMachines; i++ { id := fmt.Sprint(i) m := st.addMachine(id, true) m.setInstanceId(instance.Id("id-" + id)) m.setStateHostPort(fmt.Sprintf("0.1.2.%d:%d", i, mongoPort)) ids = append(ids, id) c.Assert(m.MongoHostPorts(), gc.HasLen, 1) m.setAPIHostPorts(addressesWithPort(apiPort, fmt.Sprintf("0.1.2.%d", i))) } st.machine("10").SetHasVote(true) st.setStateServers(ids...) st.session.Set(mkMembers("0v")) st.session.setStatus(mkStatuses("0p")) st.check = checkInvariants }
func (s *InstanceDistributorSuite) TestDistributeInstancesNoEmptyMachines(c *gc.C) { for i := range s.machines { // Assign a unit so we have a non-empty distribution group. unit, err := s.wordpress.AddUnit() c.Assert(err, gc.IsNil) m, err := unit.AssignToCleanMachine() c.Assert(err, gc.IsNil) instId := instance.Id(fmt.Sprintf("i-blah-%d", i)) err = m.SetProvisioned(instId, "fake-nonce", nil) c.Assert(err, gc.IsNil) } // InstanceDistributor is not called if there are no empty instances. s.distributor.err = fmt.Errorf("no assignment for you") unit, err := s.wordpress.AddUnit() c.Assert(err, gc.IsNil) _, err = unit.AssignToCleanMachine() c.Assert(err, gc.ErrorMatches, eligibleMachinesInUse) }
func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) { s.PatchValue(&gatherTime, 30*time.Millisecond) testGetter := new(testInstanceGetter) instance1 := newTestInstance("foobar", []string{"127.0.0.1", "192.168.1.1"}) testGetter.results = []instance.Instance{instance1} aggregator := newAggregator(testGetter) replyChan := make(chan instanceInfoReply) req := instanceInfoReq{ reply: replyChan, instId: instance.Id("foo"), } aggregator.reqc <- req reply := <-replyChan c.Assert(reply.err, gc.IsNil) instance2 := newTestInstance("not foobar", []string{"192.168.1.2"}) instance3 := newTestInstance("ok-ish", []string{"192.168.1.3"}) testGetter.results = []instance.Instance{instance2, instance3} var wg sync.WaitGroup checkInfo := func(id instance.Id, expectStatus string) { info, err := aggregator.instanceInfo(id) c.Check(err, gc.IsNil) c.Check(info.status, gc.Equals, expectStatus) wg.Done() } wg.Add(2) go checkInfo("foo2", "not foobar") go checkInfo("foo3", "ok-ish") wg.Wait() c.Assert(len(testGetter.ids), gc.DeepEquals, 2) }
// SCHEMACHANGE // This method is used to reset a deprecated machine attribute. func SetMachineInstanceId(m *Machine, instanceId string) { m.doc.InstanceId = instance.Id(instanceId) }
func (*instanceSuite) TestId(c *gc.C) { azInstance := azureInstance{instanceId: "whatever"} c.Check(azInstance.Id(), gc.Equals, instance.Id("whatever")) }
func (suite *environSuite) addNode(jsonText string) instance.Id { node := suite.testMAASObject.TestServer.NewNode(jsonText) resourceURI, _ := node.GetField("resource_uri") return instance.Id(resourceURI) }
// setUpScenario adds some entities to the state so that // we can check that they all get pulled in by // allWatcherStateBacking.getAll. func (s *storeManagerStateSuite) setUpScenario(c *gc.C) (entities entityInfoSlice) { add := func(e params.EntityInfo) { entities = append(entities, e) } m, err := s.State.AddMachine("quantal", JobManageEnviron) c.Assert(err, gc.IsNil) c.Assert(m.Tag(), gc.Equals, "machine-0") err = m.SetProvisioned(instance.Id("i-"+m.Tag()), "fake_nonce", nil) c.Assert(err, gc.IsNil) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) err = m.SetAddresses(instance.NewAddress("example.com", instance.NetworkUnknown)) c.Assert(err, gc.IsNil) add(¶ms.MachineInfo{ Id: "0", InstanceId: "i-machine-0", Status: params.StatusPending, Life: params.Alive, Series: "quantal", Jobs: []params.MachineJob{JobManageEnviron.ToParams()}, Addresses: m.Addresses(), HardwareCharacteristics: hc, }) wordpress := AddTestingService(c, s.State, "wordpress", AddTestingCharm(c, s.State, "wordpress")) err = wordpress.SetExposed() c.Assert(err, gc.IsNil) err = wordpress.SetMinUnits(3) c.Assert(err, gc.IsNil) err = wordpress.SetConstraints(constraints.MustParse("mem=100M")) c.Assert(err, gc.IsNil) setServiceConfigAttr(c, wordpress, "blog-title", "boring") add(¶ms.ServiceInfo{ Name: "wordpress", Exposed: true, CharmURL: serviceCharmURL(wordpress).String(), OwnerTag: "user-admin", Life: params.Alive, MinUnits: 3, Constraints: constraints.MustParse("mem=100M"), Config: charm.Settings{"blog-title": "boring"}, }) pairs := map[string]string{"x": "12", "y": "99"} err = wordpress.SetAnnotations(pairs) c.Assert(err, gc.IsNil) add(¶ms.AnnotationInfo{ Tag: "service-wordpress", Annotations: pairs, }) logging := AddTestingService(c, s.State, "logging", AddTestingCharm(c, s.State, "logging")) add(¶ms.ServiceInfo{ Name: "logging", CharmURL: serviceCharmURL(logging).String(), OwnerTag: "user-admin", Life: params.Alive, Config: charm.Settings{}, }) eps, err := s.State.InferEndpoints([]string{"logging", "wordpress"}) c.Assert(err, gc.IsNil) rel, err := s.State.AddRelation(eps...) c.Assert(err, gc.IsNil) add(¶ms.RelationInfo{ Key: "logging:logging-directory wordpress:logging-dir", Id: rel.Id(), Endpoints: []params.Endpoint{ {ServiceName: "logging", Relation: charm.Relation{Name: "logging-directory", Role: "requirer", Interface: "logging", Optional: false, Limit: 1, Scope: "container"}}, {ServiceName: "wordpress", Relation: charm.Relation{Name: "logging-dir", Role: "provider", Interface: "logging", Optional: false, Limit: 0, Scope: "container"}}}, }) for i := 0; i < 2; i++ { wu, err := wordpress.AddUnit() c.Assert(err, gc.IsNil) c.Assert(wu.Tag(), gc.Equals, fmt.Sprintf("unit-wordpress-%d", i)) m, err := s.State.AddMachine("quantal", JobHostUnits) c.Assert(err, gc.IsNil) c.Assert(m.Tag(), gc.Equals, fmt.Sprintf("machine-%d", i+1)) add(¶ms.UnitInfo{ Name: fmt.Sprintf("wordpress/%d", i), Service: wordpress.Name(), Series: m.Series(), MachineId: m.Id(), Ports: []instance.Port{}, Status: params.StatusPending, }) pairs := map[string]string{"name": fmt.Sprintf("bar %d", i)} err = wu.SetAnnotations(pairs) c.Assert(err, gc.IsNil) add(¶ms.AnnotationInfo{ Tag: fmt.Sprintf("unit-wordpress-%d", i), Annotations: pairs, }) err = m.SetProvisioned(instance.Id("i-"+m.Tag()), "fake_nonce", nil) c.Assert(err, gc.IsNil) err = m.SetStatus(params.StatusError, m.Tag(), nil) c.Assert(err, gc.IsNil) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) add(¶ms.MachineInfo{ Id: fmt.Sprint(i + 1), InstanceId: "i-" + m.Tag(), Status: params.StatusError, StatusInfo: m.Tag(), Life: params.Alive, Series: "quantal", Jobs: []params.MachineJob{JobHostUnits.ToParams()}, Addresses: []instance.Address{}, HardwareCharacteristics: hc, }) err = wu.AssignToMachine(m) c.Assert(err, gc.IsNil) deployer, ok := wu.DeployerTag() c.Assert(ok, gc.Equals, true) c.Assert(deployer, gc.Equals, fmt.Sprintf("machine-%d", i+1)) wru, err := rel.Unit(wu) c.Assert(err, gc.IsNil) // Create the subordinate unit as a side-effect of entering // scope in the principal's relation-unit. err = wru.EnterScope(nil) c.Assert(err, gc.IsNil) lu, err := s.State.Unit(fmt.Sprintf("logging/%d", i)) c.Assert(err, gc.IsNil) c.Assert(lu.IsPrincipal(), gc.Equals, false) deployer, ok = lu.DeployerTag() c.Assert(ok, gc.Equals, true) c.Assert(deployer, gc.Equals, fmt.Sprintf("unit-wordpress-%d", i)) add(¶ms.UnitInfo{ Name: fmt.Sprintf("logging/%d", i), Service: "logging", Series: "quantal", Ports: []instance.Port{}, Status: params.StatusPending, }) } return }
// Id implements instance.Instance.Id. func (lxc *lxcInstance) Id() instance.Id { return instance.Id(lxc.id) }
func maasObjectId(maasObject *gomaasapi.MAASObject) instance.Id { // Use the node's 'resource_uri' value. return instance.Id(maasObject.URI().String()) }
// Id implements instance.Instance.Id. func (kvm *kvmInstance) Id() instance.Id { return instance.Id(kvm.id) }
func (s *KVMSuite) TestCreateContainer(c *gc.C) { instance := containertesting.CreateContainer(c, s.manager, "1/kvm/0") name := string(instance.Id()) cloudInitFilename := filepath.Join(s.ContainerDir, name, "cloud-init") containertesting.AssertCloudInit(c, cloudInitFilename) }
// StartInstance is specified in the InstanceBroker interface. func (e *environ) StartInstance(args environs.StartInstanceParams) (instance.Instance, *instance.HardwareCharacteristics, []network.Info, error) { defer delay() machineId := args.MachineConfig.MachineId logger.Infof("dummy startinstance, machine %s", machineId) if err := e.checkBroken("StartInstance"); err != nil { return nil, nil, nil, err } estate, err := e.state() if err != nil { return nil, nil, nil, err } estate.mu.Lock() defer estate.mu.Unlock() if args.MachineConfig.MachineNonce == "" { return nil, nil, nil, fmt.Errorf("cannot start instance: missing machine nonce") } if _, ok := e.Config().CACert(); !ok { return nil, nil, nil, fmt.Errorf("no CA certificate in environment configuration") } if args.MachineConfig.StateInfo.Tag != names.MachineTag(machineId) { return nil, nil, nil, fmt.Errorf("entity tag must match started machine") } if args.MachineConfig.APIInfo.Tag != names.MachineTag(machineId) { return nil, nil, nil, fmt.Errorf("entity tag must match started machine") } logger.Infof("would pick tools from %s", args.Tools) series := args.Tools.OneSeries() idString := fmt.Sprintf("%s-%d", e.name, estate.maxId) i := &dummyInstance{ id: instance.Id(idString), addresses: instance.NewAddresses(idString + ".dns"), ports: make(map[instance.Port]bool), machineId: machineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, } var hc *instance.HardwareCharacteristics // To match current system capability, only provide hardware characteristics for // environ machines, not containers. if state.ParentId(machineId) == "" { // We will just assume the instance hardware characteristics exactly matches // the supplied constraints (if specified). hc = &instance.HardwareCharacteristics{ Arch: args.Constraints.Arch, Mem: args.Constraints.Mem, RootDisk: args.Constraints.RootDisk, CpuCores: args.Constraints.CpuCores, CpuPower: args.Constraints.CpuPower, Tags: args.Constraints.Tags, } // Fill in some expected instance hardware characteristics if constraints not specified. if hc.Arch == nil { arch := "amd64" hc.Arch = &arch } if hc.Mem == nil { mem := uint64(1024) hc.Mem = &mem } if hc.RootDisk == nil { disk := uint64(8192) hc.RootDisk = &disk } if hc.CpuCores == nil { cores := uint64(1) hc.CpuCores = &cores } } // Simulate networks added when requested. networkInfo := make([]network.Info, len(args.MachineConfig.IncludeNetworks)) for i, netName := range args.MachineConfig.IncludeNetworks { if strings.HasPrefix(netName, "bad-") { // Simulate we didn't get correct information for the network. networkInfo[i] = network.Info{ ProviderId: network.Id(netName), NetworkName: netName, CIDR: "invalid", } } else { networkInfo[i] = network.Info{ ProviderId: network.Id(netName), NetworkName: netName, CIDR: fmt.Sprintf("0.%d.2.0/24", i+1), InterfaceName: fmt.Sprintf("eth%d", i), VLANTag: i, MACAddress: fmt.Sprintf("aa:bb:cc:dd:ee:f%d", i), IsVirtual: i > 0, } } } estate.insts[i.id] = i estate.maxId++ estate.ops <- OpStartInstance{ Env: e.name, MachineId: machineId, MachineNonce: args.MachineConfig.MachineNonce, Constraints: args.Constraints, IncludeNetworks: args.MachineConfig.IncludeNetworks, ExcludeNetworks: args.MachineConfig.ExcludeNetworks, NetworkInfo: networkInfo, Instance: i, Info: args.MachineConfig.StateInfo, APIInfo: args.MachineConfig.APIInfo, Secret: e.ecfg().secret(), } return i, hc, networkInfo, nil }