Example #1
0
func (s *environSuite) TestInstances(c *gc.C) {
	var ids []instance.Id

	instances, err := s.env.Instances(ids)
	c.Assert(err, gc.Equals, environs.ErrNoInstances)
	c.Assert(instances, gc.HasLen, 0)

	ids = append(ids, BootstrapInstanceId)
	instances, err = s.env.Instances(ids)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(instances, gc.HasLen, 1)
	c.Assert(instances[0], gc.NotNil)

	ids = append(ids, BootstrapInstanceId)
	instances, err = s.env.Instances(ids)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(instances, gc.HasLen, 2)
	c.Assert(instances[0], gc.NotNil)
	c.Assert(instances[1], gc.NotNil)

	ids = append(ids, instance.Id("invalid"))
	instances, err = s.env.Instances(ids)
	c.Assert(err, gc.Equals, environs.ErrPartialInstances)
	c.Assert(instances, gc.HasLen, 3)
	c.Assert(instances[0], gc.NotNil)
	c.Assert(instances[1], gc.NotNil)
	c.Assert(instances[2], gc.IsNil)

	ids = []instance.Id{instance.Id("invalid")}
	instances, err = s.env.Instances(ids)
	c.Assert(err, gc.Equals, environs.ErrNoInstances)
	c.Assert(instances, gc.HasLen, 1)
	c.Assert(instances[0], gc.IsNil)
}
Example #2
0
func (suite *StateSuite) TestRemoveStateInstancesPartial(c *gc.C) {
	storage := suite.newStorage(c)
	state := common.BootstrapState{
		StateInstances: []instance.Id{
			instance.Id("a"),
			instance.Id("b"),
			instance.Id("c"),
		},
	}
	err := common.SaveState(storage, &state)
	c.Assert(err, jc.ErrorIsNil)

	err = common.RemoveStateInstances(
		storage,
		state.StateInstances[0],
		instance.Id("not-there"),
		state.StateInstances[2],
	)
	c.Assert(err, jc.ErrorIsNil)

	storedState, err := common.LoadState(storage)
	c.Assert(storedState, gc.DeepEquals, &common.BootstrapState{
		StateInstances: []instance.Id{
			state.StateInstances[1],
		},
	})
}
Example #3
0
func (s *IPAddressSuite) TestAllocateTo(c *gc.C) {
	machine := s.createMachine(c)

	addr := network.NewScopedAddress("0.1.2.3", network.ScopePublic)
	ipAddr, err := s.State.AddIPAddress(addr, "foobar")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(ipAddr.State(), gc.Equals, state.AddressStateUnknown)
	c.Assert(ipAddr.MachineId(), gc.Equals, "")
	c.Assert(ipAddr.InterfaceId(), gc.Equals, "")
	c.Assert(ipAddr.InstanceId(), gc.Equals, instance.UnknownId)

	err = ipAddr.AllocateTo(machine.Id(), "wobble")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(ipAddr.State(), gc.Equals, state.AddressStateAllocated)
	c.Assert(ipAddr.MachineId(), gc.Equals, machine.Id())
	c.Assert(ipAddr.InterfaceId(), gc.Equals, "wobble")
	c.Assert(ipAddr.InstanceId(), gc.Equals, instance.Id("foo"))

	freshCopy, err := s.State.IPAddress("0.1.2.3")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(freshCopy.State(), gc.Equals, state.AddressStateAllocated)
	c.Assert(freshCopy.MachineId(), gc.Equals, machine.Id())
	c.Assert(freshCopy.InterfaceId(), gc.Equals, "wobble")
	c.Assert(freshCopy.InstanceId(), gc.Equals, instance.Id("foo"))

	// allocating twice should fail.
	machine2 := s.createMachine(c)
	err = ipAddr.AllocateTo(machine2.Id(), "i")

	msg := fmt.Sprintf(
		`cannot allocate IP address "public:0.1.2.3" to machine %q, interface "i": `+
			`already allocated or unavailable`, machine2.Id())
	c.Assert(err, gc.ErrorMatches, msg)
}
Example #4
0
func (s *environInstanceSuite) TestInstancesFail(c *gc.C) {
	attrs := testing.Attrs{
		"name":     "testname",
		"region":   "testregion",
		"endpoint": "https://0.1.2.3:2000/api/2.0/",
		"username": mock.TestUser,
		"password": mock.TestPassword,
	}
	baseConfig := newConfig(c, validAttrs().Merge(attrs))

	newClientFunc := newClient
	s.PatchValue(&newClient, func(cfg *environConfig) (*environClient, error) {
		cli, err := newClientFunc(cfg)
		if cli != nil {
			cli.conn.ConnectTimeout(10 * time.Millisecond)
		}
		return cli, err
	})

	environ := s.createEnviron(c, baseConfig)

	instances, err := environ.AllInstances()
	c.Assert(instances, gc.IsNil)
	c.Assert(err, gc.NotNil)

	instances, err = environ.Instances([]instance.Id{instance.Id("123"), instance.Id("321")})
	c.Assert(instances, gc.IsNil)
	c.Assert(err, gc.NotNil)
}
Example #5
0
func (s *LxcSuite) TestDestroyContainerRemovesAutostartLink(c *gc.C) {
	manager := s.makeManager(c, "test")
	instance := containertesting.CreateContainer(c, manager, "1/lxc/0")
	err := manager.DestroyContainer(instance.Id())
	c.Assert(err, jc.ErrorIsNil)
	autostartLink := lxc.RestartSymlink(string(instance.Id()))
	c.Assert(autostartLink, jc.SymlinkDoesNotExist)
}
Example #6
0
func (s *instanceSuite) TestAllInstances(c *gc.C) {
	s.sender = s.getInstancesSender()
	instances, err := s.env.AllInstances()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(instances, gc.HasLen, 2)
	c.Assert(instances[0].Id(), gc.Equals, instance.Id("machine-0"))
	c.Assert(instances[1].Id(), gc.Equals, instance.Id("machine-1"))
}
Example #7
0
func (*utilSuite) TestGetSystemIdValues(c *gc.C) {
	instanceId1 := instance.Id("/MAAS/api/1.0/nodes/system_id1/")
	instanceId2 := instance.Id("/MAAS/api/1.0/nodes/system_id2/")
	instanceIds := []instance.Id{instanceId1, instanceId2}

	values := getSystemIdValues("id", instanceIds)

	c.Check(values["id"], gc.DeepEquals, []string{"system_id1", "system_id2"})
}
Example #8
0
func (s *ebsVolumeSuite) createVolumes(vs storage.VolumeSource, instanceId string) ([]storage.Volume, error) {
	if instanceId == "" {
		instanceId = s.srv.ec2srv.NewInstances(1, "m1.medium", imageId, ec2test.Running, nil)[0]
	}
	volume0 := names.NewVolumeTag("0")
	volume1 := names.NewVolumeTag("1")
	volume2 := names.NewVolumeTag("2")
	params := []storage.VolumeParams{{
		Tag:      volume0,
		Size:     10 * 1000,
		Provider: ec2.EBS_ProviderType,
		Attributes: map[string]interface{}{
			"persistent":  true,
			"volume-type": "io1",
			"iops":        100,
		},
		Attachment: &storage.VolumeAttachmentParams{
			AttachmentParams: storage.AttachmentParams{
				InstanceId: instance.Id(instanceId),
			},
		},
		ResourceTags: map[string]string{
			tags.JujuEnv: s.TestConfig["uuid"].(string),
		},
	}, {
		Tag:      volume1,
		Size:     20 * 1000,
		Provider: ec2.EBS_ProviderType,
		Attributes: map[string]interface{}{
			"persistent": true,
		},
		Attachment: &storage.VolumeAttachmentParams{
			AttachmentParams: storage.AttachmentParams{
				InstanceId: instance.Id(instanceId),
			},
		},
		ResourceTags: map[string]string{
			tags.JujuEnv: "something-else",
		},
	}, {
		Tag:      volume2,
		Size:     30 * 1000,
		Provider: ec2.EBS_ProviderType,
		ResourceTags: map[string]string{
			"abc": "123",
		},
		Attachment: &storage.VolumeAttachmentParams{
			AttachmentParams: storage.AttachmentParams{
				InstanceId: instance.Id(instanceId),
			},
		},
	}}
	vols, _, err := vs.CreateVolumes(params)
	return vols, err
}
Example #9
0
// setUpManual adds "manually provisioned" machines to state:
// one manager machine, and one non-manager.
func (s *destroyModelSuite) setUpManual(c *gc.C) (m0, m1 *state.Machine) {
	m0, err := s.State.AddMachine("precise", state.JobManageModel)
	c.Assert(err, jc.ErrorIsNil)
	err = m0.SetProvisioned(instance.Id("manual:0"), "manual:0:fake_nonce", nil)
	c.Assert(err, jc.ErrorIsNil)
	m1, err = s.State.AddMachine("precise", state.JobHostUnits)
	c.Assert(err, jc.ErrorIsNil)
	err = m1.SetProvisioned(instance.Id("manual:1"), "manual:1:fake_nonce", nil)
	c.Assert(err, jc.ErrorIsNil)
	return m0, m1
}
Example #10
0
func (s *environAvailzonesSuite) TestInstancesReturnPartialInstances(c *gc.C) {
	client := vsphere.ExposeEnvFakeClient(s.Env)
	client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties)
	vmName1 := common.MachineFullName(s.Env, "1")
	vmName2 := common.MachineFullName(s.Env, "2")
	s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName1, Rp: "rp1"}, vsphere.InstRp{Inst: "Some inst", Rp: "rp2"})

	_, err := s.Env.Instances([]instance.Id{instance.Id(vmName1), instance.Id(vmName2)})

	c.Assert(err, gc.Equals, environs.ErrPartialInstances)
}
Example #11
0
func (s *KVMSuite) TestDestroyContainer(c *gc.C) {
	instance := containertesting.CreateContainer(c, s.manager, "1/lxc/0")

	err := s.manager.DestroyContainer(instance.Id())
	c.Assert(err, gc.IsNil)

	name := string(instance.Id())
	// Check that the container dir is no longer in the container dir
	c.Assert(filepath.Join(s.ContainerDir, name), jc.DoesNotExist)
	// but instead, in the removed container dir
	c.Assert(filepath.Join(s.RemovedDir, name), jc.IsDirectory)
}
Example #12
0
func (s *environAvailzonesSuite) TestInstances(c *gc.C) {
	client := vsphere.ExposeEnvFakeClient(s.Env)
	client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties)
	vmName1 := common.MachineFullName(s.Env, "1")
	vmName2 := common.MachineFullName(s.Env, "2")
	s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName1, Rp: "rp1"}, vsphere.InstRp{Inst: vmName2, Rp: "rp2"})

	instances, err := s.Env.Instances([]instance.Id{instance.Id(vmName1), instance.Id(vmName2)})

	c.Assert(err, jc.ErrorIsNil)
	c.Assert(len(instances), gc.Equals, 2)
	c.Assert(string(instances[0].Id()), gc.Equals, vmName1)
	c.Assert(string(instances[1].Id()), gc.Equals, vmName2)
}
Example #13
0
// getInstance returns an up-to-date version of the instance with the given
// name.
func (env *azureEnviron) getInstance(hostedService *gwacl.HostedService, roleName string) (instance.Instance, error) {
	if n := len(hostedService.Deployments); n != 1 {
		return nil, fmt.Errorf("expected one deployment for %q, got %d", hostedService.ServiceName, n)
	}
	deployment := &hostedService.Deployments[0]

	var maskStateServerPorts bool
	var instanceId instance.Id
	switch deployment.Name {
	case deploymentNameV1(hostedService.ServiceName):
		// Old style instance.
		instanceId = instance.Id(hostedService.ServiceName)
		if n := len(deployment.RoleList); n != 1 {
			return nil, fmt.Errorf("expected one role for %q, got %d", deployment.Name, n)
		}
		roleName = deployment.RoleList[0].RoleName
		// In the old implementation of the Azure provider,
		// all machines opened the state and API server ports.
		maskStateServerPorts = true

	case deploymentNameV2(hostedService.ServiceName):
		instanceId = instance.Id(fmt.Sprintf("%s-%s", hostedService.ServiceName, roleName))
		// Newly created state server machines are put into
		// the cloud service with the stateServerLabel label.
		if decoded, err := base64.StdEncoding.DecodeString(hostedService.Label); err == nil {
			maskStateServerPorts = string(decoded) == stateServerLabel
		}
	}

	var roleInstance *gwacl.RoleInstance
	for _, role := range deployment.RoleInstanceList {
		if role.RoleName == roleName {
			roleInstance = &role
			break
		}
	}

	instance := &azureInstance{
		environ:              env,
		hostedService:        &hostedService.HostedServiceDescriptor,
		instanceId:           instanceId,
		deploymentName:       deployment.Name,
		roleName:             roleName,
		roleInstance:         roleInstance,
		maskStateServerPorts: maskStateServerPorts,
	}
	return instance, nil
}
Example #14
0
func (s *LxcSuite) TestCreateContainerWithBlockStorage(c *gc.C) {
	err := os.Remove(s.RestartDir)
	c.Assert(err, jc.ErrorIsNil)

	manager := s.makeManager(c, "test")
	machineConfig, err := containertesting.MockMachineConfig("1/lxc/0")
	c.Assert(err, jc.ErrorIsNil)
	storageConfig := &container.StorageConfig{AllowMount: true}
	networkConfig := container.BridgeNetworkConfig("nic42", 4321, nil)
	instance := containertesting.CreateContainerWithMachineAndNetworkAndStorageConfig(c, manager, machineConfig, networkConfig, storageConfig)
	name := string(instance.Id())
	autostartLink := lxc.RestartSymlink(name)
	config, err := ioutil.ReadFile(lxc.ContainerConfigFilename(name))
	c.Assert(err, jc.ErrorIsNil)
	expected := fmt.Sprintf(`
# network config
# interface "eth0"
lxc.network.type = veth
lxc.network.link = nic42
lxc.network.flags = up
lxc.network.mtu = 4321

lxc.start.auto = 1
lxc.mount.entry = %s var/log/juju none defaults,bind 0 0

lxc.aa_profile = lxc-container-default-with-mounting
lxc.cgroup.devices.allow = b 7:* rwm
lxc.cgroup.devices.allow = c 10:237 rwm
`, s.logDir)
	c.Assert(string(config), gc.Equals, expected)
	c.Assert(autostartLink, jc.DoesNotExist)
}
Example #15
0
func (s *clientSuite) TestClientAddMachinesWithInstanceIdSomeErrors(c *gc.C) {
	apiParams := make([]params.AddMachineParams, 3)
	addrs := network.NewAddresses("1.2.3.4")
	hc := instance.MustParseHardware("mem=4G")
	for i := 0; i < 3; i++ {
		apiParams[i] = params.AddMachineParams{
			Jobs:       []multiwatcher.MachineJob{multiwatcher.JobHostUnits},
			InstanceId: instance.Id(fmt.Sprintf("1234-%d", i)),
			Nonce:      "foo",
			HardwareCharacteristics: hc,
			Addrs: params.FromNetworkAddresses(addrs...),
		}
	}
	// This will cause the last add-machine to fail.
	apiParams[2].Nonce = ""
	machines, err := s.APIState.Client().AddMachines(apiParams)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(len(machines), gc.Equals, 3)
	for i, machineResult := range machines {
		if i == 2 {
			c.Assert(machineResult.Error, gc.NotNil)
			c.Assert(machineResult.Error, gc.ErrorMatches, "cannot add a new machine: cannot add a machine with an instance id and no nonce")
		} else {
			c.Assert(machineResult.Machine, gc.DeepEquals, strconv.Itoa(i))
			s.checkMachine(c, machineResult.Machine, series.LatestLts(), apiParams[i].Constraints.String())
			instanceId := fmt.Sprintf("1234-%d", i)
			s.checkInstance(c, machineResult.Machine, instanceId, "foo", hc, addrs)
		}
	}
}
Example #16
0
func (*utilSuite) TestExtractSystemId(c *gc.C) {
	instanceId := instance.Id("/MAAS/api/1.0/nodes/system_id/")

	systemId := extractSystemId(instanceId)

	c.Check(systemId, gc.Equals, "system_id")
}
Example #17
0
func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) {
	s.PatchValue(&gatherTime, 30*time.Millisecond)
	testGetter := new(testInstanceGetter)

	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})
	aggregator := newAggregator(testGetter)

	replyChan := make(chan instanceInfoReply)
	req := instanceInfoReq{
		reply:  replyChan,
		instId: instance.Id("foo"),
	}
	aggregator.reqc <- req
	reply := <-replyChan
	c.Assert(reply.err, gc.IsNil)

	testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"})
	testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"})

	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string) {
		info, err := aggregator.instanceInfo(id)
		c.Check(err, gc.IsNil)
		c.Check(info.status, gc.Equals, expectStatus)
		wg.Done()
	}

	wg.Add(2)
	go checkInfo("foo2", "not foobar")
	go checkInfo("foo3", "ok-ish")
	wg.Wait()

	c.Assert(len(testGetter.ids), gc.DeepEquals, 2)
}
func (suite *maas2EnvironSuite) TestStartInstanceParams(c *gc.C) {
	var env *maasEnviron
	suite.injectController(&fakeController{
		allocateMachineArgsCheck: func(args gomaasapi.AllocateMachineArgs) {
			c.Assert(args, gc.DeepEquals, gomaasapi.AllocateMachineArgs{
				AgentName: env.ecfg().maasAgentName(),
				Zone:      "foo",
				MinMemory: 8192,
			})
		},
		allocateMachine: newFakeMachine("Bruce Sterling", arch.HostArch(), ""),
		allocateMachineMatches: gomaasapi.ConstraintMatches{
			Storage: map[string][]gomaasapi.BlockDevice{},
		},
		zones: []gomaasapi.Zone{&fakeZone{name: "foo"}},
	})
	suite.setupFakeTools(c)
	env = suite.makeEnviron(c, nil)
	params := environs.StartInstanceParams{
		Placement:   "zone=foo",
		Constraints: constraints.MustParse("mem=8G"),
	}
	result, err := testing.StartInstanceWithParams(env, "1", params)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(result.Instance.Id(), gc.Equals, instance.Id("Bruce Sterling"))
}
Example #19
0
func (mst *mockState) setUpState() {
	mst.mu.Lock()
	defer mst.mu.Unlock()

	ips := []struct {
		value      string
		uuid       string
		life       state.Life
		subnetId   string
		instanceId string
		macaddr    string
	}{
		{"0.1.2.3", "00000000-1111-2222-3333-0123456789ab", state.Alive, "a", "a3", "fff3"},
		{"0.1.2.4", "00000000-1111-2222-4444-0123456789ab", state.Alive, "b", "b4", "fff4"},
		{"0.1.2.5", "00000000-1111-2222-5555-0123456789ab", state.Alive, "b", "b5", "fff5"},
		{"0.1.2.6", "00000000-1111-2222-6666-0123456789ab", state.Dead, "c", "c6", "fff6"},
		{"0.1.2.7", "00000000-1111-2222-7777-0123456789ab", state.Dead, "c", "c7", "fff7"},
	}
	for _, ip := range ips {
		mst.ipAddresses[ip.value] = &mockIPAddress{
			stub:       mst.stub,
			st:         mst,
			value:      ip.value,
			tag:        names.NewIPAddressTag(ip.uuid),
			life:       ip.life,
			subnetId:   ip.subnetId,
			instanceId: instance.Id(ip.instanceId),
			addr:       network.NewAddress(ip.value),
			macaddr:    ip.macaddr,
		}
	}
}
Example #20
0
func (s *storageProvisionerSuite) TestUpdateEnvironConfig(c *gc.C) {
	volumeAccessor := newMockVolumeAccessor()
	volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1")
	s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) {
		c.Assert(envConfig, gc.NotNil)
		c.Assert(sourceConfig, gc.NotNil)
		c.Assert(envConfig.AllAttrs()["foo"], gc.Equals, "bar")
		return nil, errors.New("zinga")
	}

	args := &workerArgs{volumes: volumeAccessor}
	worker := newStorageProvisioner(c, args)
	defer worker.Wait()
	defer worker.Kill()

	newConfig, err := args.environ.cfg.Apply(map[string]interface{}{"foo": "bar"})
	c.Assert(err, jc.ErrorIsNil)

	args.environ.watcher.changes <- struct{}{}
	args.environ.setConfig(newConfig)
	args.environ.watcher.changes <- struct{}{}
	args.volumes.volumesWatcher.changes <- []string{"1", "2"}

	err = worker.Wait()
	c.Assert(err, gc.ErrorMatches, `processing pending volumes: creating volumes: getting volume source: getting storage source "dummy": zinga`)
}
Example #21
0
func (s *machineSuite) TestSinglePollWhenInstancInfoUnimplemented(c *gc.C) {
	s.PatchValue(&ShortPoll, 1*time.Millisecond)
	s.PatchValue(&LongPoll, 1*time.Millisecond)
	count := int32(0)
	getInstanceInfo := func(id instance.Id) (instanceInfo, error) {
		c.Check(id, gc.Equals, instance.Id("i1234"))
		atomic.AddInt32(&count, 1)
		err := &params.Error{
			Code:    params.CodeNotImplemented,
			Message: "instance address not implemented",
		}
		return instanceInfo{}, err
	}
	context := &testMachineContext{
		getInstanceInfo: getInstanceInfo,
		dyingc:          make(chan struct{}),
	}
	m := &testMachine{
		tag:        names.NewMachineTag("99"),
		instanceId: "i1234",
		refresh:    func() error { return nil },
		life:       params.Alive,
	}
	died := make(chan machine)

	go runMachine(context, m, nil, died)

	time.Sleep(coretesting.ShortWait)
	killMachineLoop(c, m, context.dyingc, died)
	c.Assert(context.killAllErr, gc.Equals, nil)
	c.Assert(count, gc.Equals, int32(1))
}
Example #22
0
func (s *LxcSuite) TestCreateContainerEvents(c *gc.C) {
	manager := s.makeManager(c, "test")
	instance := containertesting.CreateContainer(c, manager, "1")
	id := string(instance.Id())
	s.AssertEvent(c, <-s.events, mock.Created, id)
	s.AssertEvent(c, <-s.events, mock.Started, id)
}
Example #23
0
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string {
	// This check in particular leads to tests just hanging
	// indefinitely quite often on i386.
	coretesting.SkipIfI386(c, "lp:1425569")

	var event mock.Event
	s.State.StartSync()
	select {
	case event = <-s.events:
		c.Assert(event.Action, gc.Equals, mock.Created)
		argsSet := set.NewStrings(event.TemplateArgs...)
		c.Assert(argsSet.Contains("imageURL"), jc.IsTrue)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timeout while waiting the mock container to get created")
	}

	select {
	case event = <-s.events:
		c.Assert(event.Action, gc.Equals, mock.Started)
		err := machine.Refresh()
		c.Assert(err, jc.ErrorIsNil)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timeout while waiting the mock container to start")
	}

	s.waitInstanceId(c, machine, instance.Id(event.InstanceId))
	return event.InstanceId
}
Example #24
0
func (env *joyentEnviron) Instances(ids []instance.Id) ([]instance.Instance, error) {
	if len(ids) == 0 {
		return nil, nil
	}

	logger.Debugf("Looking for instances %q", ids)

	instances := make([]instance.Instance, len(ids))
	found := 0

	allInstances, err := env.AllInstances()
	if err != nil {
		return nil, err
	}

	for i, id := range ids {
		for _, instance := range allInstances {
			if instance.Id() == id {
				instances[i] = instance
				found++
			}
		}
	}

	logger.Debugf("Found %d instances %q", found, instances)

	if found == 0 {
		return nil, environs.ErrNoInstances
	} else if found < len(ids) {
		return instances, environs.ErrPartialInstances
	}

	return instances, nil
}
Example #25
0
func (s *BootstrapSuite) TestCannotRecordStartedInstance(c *gc.C) {
	innerStorage := newStorage(s, c)
	stor := &mockStorage{Storage: innerStorage}

	startInstance := func(
		_ string, _ constraints.Value, _ []string, _ tools.List, _ *cloudinit.MachineConfig,
	) (
		instance.Instance, *instance.HardwareCharacteristics, []network.Info, error,
	) {
		stor.putErr = fmt.Errorf("suddenly a wild blah")
		return &mockInstance{id: "i-blah"}, nil, nil, nil
	}

	var stopped []instance.Id
	stopInstances := func(ids []instance.Id) error {
		stopped = append(stopped, ids...)
		return nil
	}

	env := &mockEnviron{
		storage:       stor,
		startInstance: startInstance,
		stopInstances: stopInstances,
		config:        configGetter(c),
	}

	ctx := coretesting.Context(c)
	err := common.Bootstrap(ctx, env, environs.BootstrapParams{})
	c.Assert(err, gc.ErrorMatches, "cannot save state: suddenly a wild blah")
	c.Assert(stopped, gc.HasLen, 1)
	c.Assert(stopped[0], gc.Equals, instance.Id("i-blah"))
}
Example #26
0
func (s *DestroySuite) TestSuccess(c *gc.C) {
	s.PatchValue(&jujuversion.Current, testing.FakeVersionNumber)
	stor := newStorage(s, c)
	err := stor.Put("somewhere", strings.NewReader("stuff"), 5)
	c.Assert(err, jc.ErrorIsNil)

	env := &mockEnviron{
		storage: stor,
		allInstances: func() ([]instance.Instance, error) {
			return []instance.Instance{
				&mockInstance{id: "one"},
			}, nil
		},
		stopInstances: func(ids []instance.Id) error {
			c.Assert(ids, gc.HasLen, 1)
			c.Assert(ids[0], gc.Equals, instance.Id("one"))
			return nil
		},
		config: configGetter(c),
	}
	err = common.Destroy(env)
	c.Assert(err, jc.ErrorIsNil)

	// common.Destroy doesn't touch provider/object storage anymore.
	r, err := stor.Get("somewhere")
	c.Assert(err, jc.ErrorIsNil)
	r.Close()
}
Example #27
0
// countPolls sets up a machine loop with the given
// addresses and status to be returned from getInstanceInfo,
// waits for coretesting.ShortWait, and returns the
// number of times the instance is polled.
func countPolls(c *gc.C, addrs []network.Address, instId, instStatus string, machineStatus params.Status) int {
	count := int32(0)
	getInstanceInfo := func(id instance.Id) (instanceInfo, error) {
		c.Check(string(id), gc.Equals, instId)
		atomic.AddInt32(&count, 1)
		if addrs == nil {
			return instanceInfo{}, fmt.Errorf("no instance addresses available")
		}
		return instanceInfo{addrs, instStatus}, nil
	}
	context := &testMachineContext{
		getInstanceInfo: getInstanceInfo,
		dyingc:          make(chan struct{}),
	}
	m := &testMachine{
		tag:        names.NewMachineTag("99"),
		instanceId: instance.Id(instId),
		refresh:    func() error { return nil },
		addresses:  addrs,
		life:       params.Alive,
		status:     machineStatus,
	}
	died := make(chan machine)

	go runMachine(context, m, nil, died)

	time.Sleep(coretesting.ShortWait)
	killMachineLoop(c, m, context.dyingc, died)
	c.Assert(context.killAllErr, gc.Equals, nil)
	return int(count)
}
Example #28
0
func restoreBootstrapMachine(st api.Connection, backupFile string, agentConf agentConfig) (addr string, err error) {
	client := st.Client()
	addr, err = client.PublicAddress("0")
	if err != nil {
		return "", errors.Annotate(err, "cannot get public address of bootstrap machine")
	}
	paddr, err := client.PrivateAddress("0")
	if err != nil {
		return "", errors.Annotate(err, "cannot get private address of bootstrap machine")
	}
	status, err := client.Status(nil)
	if err != nil {
		return "", errors.Annotate(err, "cannot get environment status")
	}
	info, ok := status.Machines["0"]
	if !ok {
		return "", fmt.Errorf("cannot find bootstrap machine in status")
	}
	newInstId := instance.Id(info.InstanceId)

	progress("copying backup file to bootstrap host")
	if err := sendViaScp(backupFile, addr, "~/juju-backup.tgz"); err != nil {
		return "", errors.Annotate(err, "cannot copy backup file to bootstrap instance")
	}
	progress("updating bootstrap machine")
	if err := runViaSsh(addr, updateBootstrapMachineScript(newInstId, agentConf, addr, paddr)); err != nil {
		return "", errors.Annotate(err, "update script failed")
	}
	return addr, nil
}
Example #29
0
func (s *cinderVolumeSourceSuite) TestAttachVolumes(c *gc.C) {
	mockAdapter := &mockAdapter{
		attachVolume: func(serverId, volId, mountPoint string) (*nova.VolumeAttachment, error) {
			c.Check(volId, gc.Equals, mockVolId)
			c.Check(serverId, gc.Equals, mockServerId)
			return &nova.VolumeAttachment{
				Id:       volId,
				VolumeId: volId,
				ServerId: serverId,
				Device:   "/dev/sda",
			}, nil
		},
	}

	volSource := openstack.NewCinderVolumeSource(mockAdapter)
	results, err := volSource.AttachVolumes([]storage.VolumeAttachmentParams{{
		Volume:   mockVolumeTag,
		VolumeId: mockVolId,
		AttachmentParams: storage.AttachmentParams{
			Provider:   openstack.CinderProviderType,
			Machine:    mockMachineTag,
			InstanceId: instance.Id(mockServerId),
		}},
	})
	c.Assert(err, jc.ErrorIsNil)
	c.Check(results, jc.DeepEquals, []storage.AttachVolumesResult{{
		VolumeAttachment: &storage.VolumeAttachment{
			mockVolumeTag,
			mockMachineTag,
			storage.VolumeAttachmentInfo{
				DeviceName: "sda",
			},
		},
	}})
}
Example #30
0
// commonServiceInstances returns instances with
// services in common with the specified machine.
func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) {
	units, err := m.Units()
	if err != nil {
		return nil, err
	}
	instanceIdSet := make(set.Strings)
	for _, unit := range units {
		if !unit.IsPrincipal() {
			continue
		}
		instanceIds, err := state.ServiceInstances(st, unit.ApplicationName())
		if err != nil {
			return nil, err
		}
		for _, instanceId := range instanceIds {
			instanceIdSet.Add(string(instanceId))
		}
	}
	instanceIds := make([]instance.Id, instanceIdSet.Size())
	// Sort values to simplify testing.
	for i, instanceId := range instanceIdSet.SortedValues() {
		instanceIds[i] = instance.Id(instanceId)
	}
	return instanceIds, nil
}