Beispiel #1
0
// newRole creates a gwacl.Role object (an Azure Virtual Machine) which uses
// the given Virtual Hard Drive.
// The VM will have:
// - an 'ubuntu' user defined with an unguessable (randomly generated) password
// - its ssh port (TCP 22) open
// - its state port (TCP mongoDB) port open
// - its API port (TCP) open
func (env *azureEnviron) newRole(vhd *gwacl.OSVirtualHardDisk, userData string, roleHostname string) *gwacl.Role {
	// TODO: Derive the role size from the constraints.
	// ExtraSmall|Small|Medium|Large|ExtraLarge
	roleSize := "Small"
	// Create a Linux Configuration with the username and the password
	// empty and disable SSH with password authentication.
	hostname := roleHostname
	username := "******"
	password := gwacl.MakeRandomPassword()
	linuxConfigurationSet := gwacl.NewLinuxProvisioningConfigurationSet(hostname, username, password, userData, "true")
	config := env.Config()
	// Generate a Network Configuration with the initially required ports
	// open.
	networkConfigurationSet := gwacl.NewNetworkConfigurationSet([]gwacl.InputEndpoint{
		{
			LocalPort: 22,
			Name:      "sshport",
			Port:      22,
			Protocol:  "TCP",
		},
		// TODO: Ought to have this only for state servers.
		{
			LocalPort: config.StatePort(),
			Name:      "stateport",
			Port:      config.StatePort(),
			Protocol:  "TCP",
		},
		// TODO: Ought to have this only for API servers.
		{
			LocalPort: config.APIPort(),
			Name:      "apiport",
			Port:      config.APIPort(),
			Protocol:  "TCP",
		},
	}, nil)
	roleName := gwacl.MakeRandomRoleName("juju")
	// The ordering of these configuration sets is significant for the tests.
	return gwacl.NewRole(
		roleSize, roleName,
		[]gwacl.ConfigurationSet{*linuxConfigurationSet, *networkConfigurationSet},
		[]gwacl.OSVirtualHardDisk{*vhd})
}
Beispiel #2
0
// getInitialEndpoints returns a slice of the endpoints every instance should have open
// (ssh port, etc).
func (env *azureEnviron) getInitialEndpoints() []gwacl.InputEndpoint {
	config := env.Config()
	return []gwacl.InputEndpoint{
		{
			LocalPort: 22,
			Name:      "sshport",
			Port:      22,
			Protocol:  "tcp",
		},
		// TODO: Ought to have this only for state servers.
		{
			LocalPort: config.StatePort(),
			Name:      "stateport",
			Port:      config.StatePort(),
			Protocol:  "tcp",
		},
		// TODO: Ought to have this only for API servers.
		{
			LocalPort: config.APIPort(),
			Name:      "apiport",
			Port:      config.APIPort(),
			Protocol:  "tcp",
		}}
}
func (suite *EnvironSuite) TestStateInfo(c *C) {
	env := suite.makeEnviron()
	hostname := "test"
	input := `{"system_id": "system_id", "hostname": "` + hostname + `"}`
	node := suite.testMAASObject.TestServer.NewNode(input)
	testInstance := &maasInstance{&node, suite.environ}
	err := environs.SaveState(
		env.Storage(),
		&environs.BootstrapState{StateInstances: []instance.Id{testInstance.Id()}})
	c.Assert(err, IsNil)

	stateInfo, apiInfo, err := env.StateInfo()
	c.Assert(err, IsNil)

	config := env.Config()
	statePortSuffix := fmt.Sprintf(":%d", config.StatePort())
	apiPortSuffix := fmt.Sprintf(":%d", config.APIPort())
	c.Assert(stateInfo.Addrs, DeepEquals, []string{hostname + statePortSuffix})
	c.Assert(apiInfo.Addrs, DeepEquals, []string{hostname + apiPortSuffix})
}
Beispiel #4
0
func (*EnvironSuite) TestStateInfo(c *C) {
	instanceID := "my-instance"
	patchWithServiceListResponse(c, []gwacl.HostedServiceDescriptor{{
		ServiceName: instanceID,
	}})
	env := makeEnviron(c)
	cleanup := setDummyStorage(c, env)
	defer cleanup()
	err := environs.SaveState(
		env.Storage(),
		&environs.BootstrapState{StateInstances: []instance.Id{instance.Id(instanceID)}})
	c.Assert(err, IsNil)

	stateInfo, apiInfo, err := env.StateInfo()
	c.Assert(err, IsNil)

	config := env.Config()
	dnsName := "my-instance." + AZURE_DOMAIN_NAME
	stateServerAddr := fmt.Sprintf("%s:%d", dnsName, config.StatePort())
	apiServerAddr := fmt.Sprintf("%s:%d", dnsName, config.APIPort())
	c.Check(stateInfo.Addrs, DeepEquals, []string{stateServerAddr})
	c.Check(apiInfo.Addrs, DeepEquals, []string{apiServerAddr})
}
Beispiel #5
0
// internalStartInstance is the internal version of StartInstance, used by
// Bootstrap as well as via StartInstance itself.
// machineConfig will be filled out with further details, but should contain
// MachineID, MachineNonce, StateInfo, and APIInfo.
// TODO(bug 1199847): Some of this work can be shared between providers.
func (e *environ) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (instance.Instance, *instance.HardwareCharacteristics, error) {
	series := possibleTools.Series()
	if len(series) != 1 {
		panic(fmt.Errorf("should have gotten tools for one series, got %v", series))
	}
	arches := possibleTools.Arches()
	spec, err := findInstanceSpec(e, &instances.InstanceConstraint{
		Region:      e.ecfg().region(),
		Series:      series[0],
		Arches:      arches,
		Constraints: cons,
	})
	if err != nil {
		return nil, nil, err
	}
	tools, err := possibleTools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, nil, fmt.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	machineConfig.Tools = tools[0]

	if err := environs.FinishMachineConfig(machineConfig, e.Config(), cons); err != nil {
		return nil, nil, err
	}
	userData, err := environs.ComposeUserData(machineConfig)
	if err != nil {
		return nil, nil, fmt.Errorf("cannot make user data: %v", err)
	}
	log.Debugf("environs/openstack: openstack user data; %d bytes", len(userData))
	withPublicIP := e.ecfg().useFloatingIP()
	var publicIP *nova.FloatingIP
	if withPublicIP {
		if fip, err := e.allocatePublicIP(); err != nil {
			return nil, nil, fmt.Errorf("cannot allocate a public IP as needed: %v", err)
		} else {
			publicIP = fip
			log.Infof("environs/openstack: allocated public IP %s", publicIP.IP)
		}
	}
	config := e.Config()
	groups, err := e.setUpGroups(machineConfig.MachineId, config.StatePort(), config.APIPort())
	if err != nil {
		return nil, nil, fmt.Errorf("cannot set up groups: %v", err)
	}
	var groupNames = make([]nova.SecurityGroupName, len(groups))
	for i, g := range groups {
		groupNames[i] = nova.SecurityGroupName{g.Name}
	}

	var server *nova.Entity
	for a := shortAttempt.Start(); a.Next(); {
		server, err = e.nova().RunServer(nova.RunServerOpts{
			Name:               e.machineFullName(machineConfig.MachineId),
			FlavorId:           spec.InstanceType.Id,
			ImageId:            spec.Image.Id,
			UserData:           userData,
			SecurityGroupNames: groupNames,
		})
		if err == nil || !gooseerrors.IsNotFound(err) {
			break
		}
	}
	if err != nil {
		return nil, nil, fmt.Errorf("cannot run instance: %v", err)
	}
	detail, err := e.nova().GetServer(server.Id)
	if err != nil {
		return nil, nil, fmt.Errorf("cannot get started instance: %v", err)
	}
	inst := &openstackInstance{
		e:            e,
		ServerDetail: detail,
		arch:         &spec.Image.Arch,
		instType:     &spec.InstanceType,
	}
	log.Infof("environs/openstack: started instance %q", inst.Id())
	if withPublicIP {
		if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil {
			if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil {
				// ignore the failure at this stage, just log it
				log.Debugf("environs/openstack: failed to terminate instance %q: %v", inst.Id(), err)
			}
			return nil, nil, fmt.Errorf("cannot assign public address %s to instance %q: %v", publicIP.IP, inst.Id(), err)
		}
		log.Infof("environs/openstack: assigned public IP %s to %q", publicIP.IP, inst.Id())
	}
	return inst, inst.hardwareCharacteristics(), nil
}
Beispiel #6
0
// internalStartInstance is the internal version of StartInstance, used by
// Bootstrap as well as via StartInstance itself.
// TODO(bug 1199847): Some of this work can be shared between providers.
func (e *environ) internalStartInstance(cons constraints.Value, possibleTools tools.List, machineConfig *cloudinit.MachineConfig) (instance.Instance, *instance.HardwareCharacteristics, error) {
	series := possibleTools.Series()
	if len(series) != 1 {
		panic(fmt.Errorf("should have gotten tools for one series, got %v", series))
	}
	arches := possibleTools.Arches()
	storage := ebsStorage
	baseURLs, err := e.getImageBaseURLs()
	if err != nil {
		return nil, nil, err
	}
	spec, err := findInstanceSpec(baseURLs, &instances.InstanceConstraint{
		Region:      e.ecfg().region(),
		Series:      series[0],
		Arches:      arches,
		Constraints: cons,
		Storage:     &storage,
	})
	if err != nil {
		return nil, nil, err
	}
	tools, err := possibleTools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, nil, fmt.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	machineConfig.Tools = tools[0]
	if err := environs.FinishMachineConfig(machineConfig, e.Config(), cons); err != nil {
		return nil, nil, err
	}

	userData, err := environs.ComposeUserData(machineConfig)
	if err != nil {
		return nil, nil, fmt.Errorf("cannot make user data: %v", err)
	}
	log.Debugf("environs/ec2: ec2 user data; %d bytes", len(userData))
	config := e.Config()
	groups, err := e.setUpGroups(machineConfig.MachineId, config.StatePort(), config.APIPort())
	if err != nil {
		return nil, nil, fmt.Errorf("cannot set up groups: %v", err)
	}
	var instances *ec2.RunInstancesResp

	for a := shortAttempt.Start(); a.Next(); {
		instances, err = e.ec2().RunInstances(&ec2.RunInstances{
			ImageId:        spec.Image.Id,
			MinCount:       1,
			MaxCount:       1,
			UserData:       userData,
			InstanceType:   spec.InstanceType.Name,
			SecurityGroups: groups,
		})
		if err == nil || ec2ErrCode(err) != "InvalidGroup.NotFound" {
			break
		}
	}
	if err != nil {
		return nil, nil, fmt.Errorf("cannot run instances: %v", err)
	}
	if len(instances.Instances) != 1 {
		return nil, nil, fmt.Errorf("expected 1 started instance, got %d", len(instances.Instances))
	}
	inst := &ec2Instance{
		e:        e,
		Instance: &instances.Instances[0],
		arch:     &spec.Image.Arch,
		instType: &spec.InstanceType,
	}
	log.Infof("environs/ec2: started instance %q", inst.Id())
	return inst, inst.hardwareCharacteristics(), nil
}