func checkName(name string) { c, err := helpers.LoadCredentials() if err != nil { log.Fatalf("Error: %v", err) } ac := storage.NewAccountsClient(c["subscriptionID"]) spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) if err != nil { log.Fatalf("Error: %v", err) } ac.Authorizer = spt ac.Sender = autorest.CreateSender( autorest.WithLogging(log.New(os.Stdout, "sdk-example: ", log.LstdFlags))) ac.RequestInspector = withInspection() ac.ResponseInspector = byInspecting() cna, err := ac.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: to.StringPtr(name), Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) if err != nil { log.Fatalf("Error: %v", err) } else { if to.Bool(cna.NameAvailable) { fmt.Printf("The name '%s' is available\n", name) } else { fmt.Printf("The name '%s' is unavailable because %s\n", name, to.String(cna.Message)) } } }
func createAccount(resourceGroup, name string) { c, err := helpers.LoadCredentials() if err != nil { log.Fatalf("Error: %v", err) } ac := storage.NewAccountsClient(c["subscriptionID"]) spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) if err != nil { log.Fatalf("Error: %v", err) } ac.Authorizer = spt cna, err := ac.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: to.StringPtr(name), Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) if err != nil { log.Fatalf("Error: %v", err) return } if !to.Bool(cna.NameAvailable) { fmt.Printf("%s is unavailable -- try again\n", name) return } fmt.Printf("%s is available\n\n", name) ac.Sender = autorest.CreateSender(withWatcher()) ac.PollingMode = autorest.PollUntilAttempts ac.PollingAttempts = 5 cp := storage.AccountCreateParameters{} cp.Location = to.StringPtr("westus") cp.Properties = &storage.AccountPropertiesCreateParameters{AccountType: storage.StandardLRS} sa, err := ac.Create(resourceGroup, name, cp) if err != nil { if sa.Response.StatusCode != http.StatusAccepted { fmt.Printf("Creation of %s.%s failed with err -- %v\n", resourceGroup, name, err) return } fmt.Printf("Create initiated for %s.%s -- poll %s to check status\n", resourceGroup, name, sa.GetPollingLocation()) return } fmt.Printf("Successfully created %s.%s\n\n", resourceGroup, name) ac.Sender = nil r, err := ac.Delete(resourceGroup, name) if err != nil { fmt.Printf("Delete of %s.%s failed with status %s\n...%v\n", resourceGroup, name, r.Status, err) return } fmt.Printf("Deletion of %s.%s succeeded -- %s\n", resourceGroup, name, r.Status) }
func makeVirtualMachine(name string) compute.VirtualMachine { return compute.VirtualMachine{ Name: to.StringPtr(name), Properties: &compute.VirtualMachineProperties{ ProvisioningState: to.StringPtr("Successful"), }, } }
func (v *azureVolumeSource) attachVolume( vm *compute.VirtualMachine, p storage.VolumeAttachmentParams, ) (_ *storage.VolumeAttachment, updated bool, _ error) { dataDisksRoot := dataDiskVhdRoot(v.env.config.location, v.env.config.storageAccount) dataDiskName := p.VolumeId vhdURI := dataDisksRoot + dataDiskName + vhdExtension var dataDisks []compute.DataDisk if vm.Properties.StorageProfile.DataDisks != nil { dataDisks = *vm.Properties.StorageProfile.DataDisks } for _, disk := range dataDisks { if to.String(disk.Name) != p.VolumeId { continue } if to.String(disk.Vhd.URI) != vhdURI { continue } // Disk is already attached. volumeAttachment := &storage.VolumeAttachment{ p.Volume, p.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(to.Int(disk.Lun)), }, } return volumeAttachment, false, nil } lun, err := nextAvailableLUN(vm) if err != nil { return nil, false, errors.Annotate(err, "choosing LUN") } dataDisk := compute.DataDisk{ Lun: to.IntPtr(lun), Name: to.StringPtr(dataDiskName), Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)}, Caching: compute.ReadWrite, CreateOption: compute.Attach, } dataDisks = append(dataDisks, dataDisk) vm.Properties.StorageProfile.DataDisks = &dataDisks volumeAttachment := storage.VolumeAttachment{ p.Volume, p.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(lun), }, } return &volumeAttachment, true, nil }
func createStorageAccount( client storage.AccountsClient, accountType storage.AccountType, resourceGroup string, location string, tags map[string]string, accountNameGenerator func() string, ) (string, string, error) { logger.Debugf("creating storage account (finding available name)") const maxAttempts = 10 for remaining := maxAttempts; remaining > 0; remaining-- { accountName := accountNameGenerator() logger.Debugf("- checking storage account name %q", accountName) result, err := client.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: to.StringPtr(accountName), // Azure is a little inconsistent with when Type is // required. It's required here. Type: to.StringPtr("Microsoft.Storage/storageAccounts"), }, ) if err != nil { return "", "", errors.Annotate(err, "checking account name availability") } if !to.Bool(result.NameAvailable) { logger.Debugf( "%q is not available (%v): %v", accountName, result.Reason, result.Message, ) continue } createParams := storage.AccountCreateParameters{ Location: to.StringPtr(location), Tags: toTagsPtr(tags), Properties: &storage.AccountPropertiesCreateParameters{ AccountType: accountType, }, } logger.Debugf("- creating %q storage account %q", accountType, accountName) // TODO(axw) account creation can fail if the account name is // available, but contains profanity. We should retry a set // number of times even if creating fails. if _, err := client.Create(resourceGroup, accountName, createParams); err != nil { return "", "", errors.Trace(err) } logger.Debugf("- listing storage account keys") listKeysResult, err := client.ListKeys(resourceGroup, accountName) if err != nil { return "", "", errors.Annotate(err, "listing storage account keys") } return accountName, to.String(listKeysResult.Key1), nil } return "", "", errors.New("could not find available storage account name") }
// createVolume updates the provided VirtualMachine's StorageProfile with the // parameters for creating a new data disk. We don't actually interact with // the Azure API until after all changes to the VirtualMachine are made. func (v *azureVolumeSource) createVolume( vm *compute.VirtualMachine, p storage.VolumeParams, ) (*storage.Volume, *storage.VolumeAttachment, error) { lun, err := nextAvailableLUN(vm) if err != nil { return nil, nil, errors.Annotate(err, "choosing LUN") } dataDisksRoot := dataDiskVhdRoot(v.env.config.location, v.env.config.storageAccount) dataDiskName := p.Tag.String() vhdURI := dataDisksRoot + dataDiskName + vhdExtension sizeInGib := mibToGib(p.Size) dataDisk := compute.DataDisk{ Lun: to.IntPtr(lun), DiskSizeGB: to.IntPtr(int(sizeInGib)), Name: to.StringPtr(dataDiskName), Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)}, Caching: compute.ReadWrite, CreateOption: compute.Empty, } var dataDisks []compute.DataDisk if vm.Properties.StorageProfile.DataDisks != nil { dataDisks = *vm.Properties.StorageProfile.DataDisks } dataDisks = append(dataDisks, dataDisk) vm.Properties.StorageProfile.DataDisks = &dataDisks // Data disks associate VHDs to machines. In Juju's storage model, // the VHD is the volume and the disk is the volume attachment. volume := storage.Volume{ p.Tag, storage.VolumeInfo{ VolumeId: dataDiskName, Size: gibToMib(sizeInGib), // We don't currently support persistent volumes in // Azure, as it requires removal of "comp=media" when // deleting VMs, complicating cleanup. Persistent: true, }, } volumeAttachment := storage.VolumeAttachment{ p.Tag, p.Attachment.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(lun), }, } return &volume, &volumeAttachment, nil }
func makePublicIPAddress(pipName, vmName, ipAddress string) network.PublicIPAddress { tags := map[string]*string{"juju-machine-name": &vmName} pip := network.PublicIPAddress{ Name: to.StringPtr(pipName), Tags: &tags, Properties: &network.PublicIPAddressPropertiesFormat{}, } if ipAddress != "" { pip.Properties.IPAddress = to.StringPtr(ipAddress) } return pip }
func makeSecurityRule(name, ipAddress, ports string) network.SecurityRule { return network.SecurityRule{ Name: to.StringPtr(name), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationAddressPrefix: to.StringPtr(ipAddress), DestinationPortRange: to.StringPtr(ports), Access: network.Allow, Priority: to.IntPtr(200), Direction: network.Inbound, }, } }
func createStorageAccount( group resources.ResourceGroup, arm arm.Client) error { ac := arm.StorageAccounts() cna, err := ac.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: group.Name, Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) if err != nil { return err } if to.Bool(cna.NameAvailable) { name := *group.Name props := storage.AccountPropertiesCreateParameters{AccountType: storage.StandardLRS} _, err = ac.Create(name, name, storage.AccountCreateParameters{ Location: group.Location, Properties: &props, }) if err != nil { return fmt.Errorf("Failed to create storage account '%s' in location '%s': '%s'\n", name, *group.Location, err.Error()) } } return nil }
func createInternalVirtualNetwork( client network.ManagementClient, controllerResourceGroup string, location string, tags map[string]string, ) (*network.VirtualNetwork, error) { addressPrefixes := make([]string, 256) for i := range addressPrefixes { addressPrefixes[i] = fmt.Sprintf("10.%d.0.0/16", i) } virtualNetworkParams := network.VirtualNetwork{ Location: to.StringPtr(location), Tags: toTagsPtr(tags), Properties: &network.VirtualNetworkPropertiesFormat{ AddressSpace: &network.AddressSpace{&addressPrefixes}, }, } logger.Debugf("creating virtual network %q", internalNetworkName) vnetClient := network.VirtualNetworksClient{client} vnet, err := vnetClient.CreateOrUpdate( controllerResourceGroup, internalNetworkName, virtualNetworkParams, ) if err != nil { return nil, errors.Annotatef(err, "creating virtual network %q", internalNetworkName) } return &vnet, nil }
func createVirtualMachine( group resources.ResourceGroup, vmName, adminName, adminPassword string, availSet compute.AvailabilitySet, networkInterface network.Interface, arm arm.Client) error { vmc := arm.VirtualMachines() netRefs := make([]compute.NetworkInterfaceReference, 1, 1) netRefs[0] = compute.NetworkInterfaceReference{ID: networkInterface.ID} groupName := *group.Name accountName := groupName vmParams := compute.VirtualMachine{ Location: group.Location, Properties: &compute.VirtualMachineProperties{ AvailabilitySet: &compute.SubResource{ID: availSet.ID}, HardwareProfile: &compute.HardwareProfile{VMSize: compute.StandardA0}, NetworkProfile: &compute.NetworkProfile{NetworkInterfaces: &netRefs}, StorageProfile: &compute.StorageProfile{ ImageReference: &compute.ImageReference{ Publisher: to.StringPtr("MicrosoftWindowsServer"), Offer: to.StringPtr("WindowsServer"), Sku: to.StringPtr("2012-R2-Datacenter"), Version: to.StringPtr("latest"), }, OsDisk: &compute.OSDisk{ Name: to.StringPtr("mytestod1"), CreateOption: compute.FromImage, Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr("http://" + accountName + ".blob.core.windows.net/vhds/mytestod1.vhd"), }, }, }, OsProfile: &compute.OSProfile{ AdminUsername: to.StringPtr(adminName), AdminPassword: to.StringPtr(adminPassword), ComputerName: to.StringPtr(vmName), WindowsConfiguration: &compute.WindowsConfiguration{ProvisionVMAgent: to.BoolPtr(true)}, }, }, } if _, err := vmc.CreateOrUpdate(groupName, vmName, vmParams); err != nil { return fmt.Errorf("Failed to create virtual machine '%s' in location '%s': '%s'\n", vmName, *group.Location, err.Error()) } return nil }
func makeIPConfiguration(privateIPAddress string) network.InterfaceIPConfiguration { ipConfiguration := network.InterfaceIPConfiguration{ Properties: &network.InterfaceIPConfigurationPropertiesFormat{}, } if privateIPAddress != "" { ipConfiguration.Properties.PrivateIPAddress = to.StringPtr(privateIPAddress) } return ipConfiguration }
// initResourceGroup creates and initialises a resource group for this // environment. The resource group will have a storage account and a // subnet associated with it (but not necessarily contained within: // see subnet creation). func (env *azureEnviron) initResourceGroup() (*config.Config, error) { location := env.config.location tags, _ := env.config.ResourceTags() resourceGroupsClient := resources.GroupsClient{env.resources} logger.Debugf("creating resource group %q", env.resourceGroup) _, err := resourceGroupsClient.CreateOrUpdate(env.resourceGroup, resources.Group{ Location: to.StringPtr(location), Tags: toTagsPtr(tags), }) if err != nil { return nil, errors.Annotate(err, "creating resource group") } var vnetPtr *network.VirtualNetwork if env.resourceGroup == env.controllerResourceGroup { // Create an internal network for all VMs to connect to. vnetPtr, err = createInternalVirtualNetwork( env.network, env.controllerResourceGroup, location, tags, ) if err != nil { return nil, errors.Annotate(err, "creating virtual network") } } else { // We're creating a hosted environment, so we need to fetch // the virtual network to create a subnet below. vnetClient := network.VirtualNetworksClient{env.network} vnet, err := vnetClient.Get(env.controllerResourceGroup, internalNetworkName) if err != nil { return nil, errors.Annotate(err, "getting virtual network") } vnetPtr = &vnet } _, err = createInternalSubnet( env.network, env.resourceGroup, env.controllerResourceGroup, vnetPtr, location, tags, ) if err != nil { return nil, errors.Annotate(err, "creating subnet") } // Create a storage account for the resource group. storageAccountsClient := storage.AccountsClient{env.storage} storageAccountName, storageAccountKey, err := createStorageAccount( storageAccountsClient, env.config.storageAccountType, env.resourceGroup, location, tags, env.provider.config.StorageAccountNameGenerator, ) if err != nil { return nil, errors.Annotate(err, "creating storage account") } return env.config.Config.Apply(map[string]interface{}{ configAttrStorageAccount: storageAccountName, configAttrStorageAccountKey: storageAccountKey, }) }
// createVMExtension creates a CustomScript VM extension for the given VM // which will execute the CustomData on the machine as a script. func createVMExtension( vmExtensionClient compute.VirtualMachineExtensionsClient, os jujuos.OSType, resourceGroup, vmName, location string, vmTags map[string]string, ) error { var commandToExecute, extensionPublisher, extensionType, extensionVersion string switch os { case jujuos.Windows: commandToExecute = windowsExecuteCustomScriptCommand extensionPublisher = windowsCustomScriptPublisher extensionType = windowsCustomScriptType extensionVersion = windowsCustomScriptVersion case jujuos.CentOS: commandToExecute = linuxExecuteCustomScriptCommand extensionPublisher = linuxCustomScriptPublisher extensionType = linuxCustomScriptType extensionVersion = linuxCustomScriptVersion default: // Ubuntu renders CustomData as cloud-config, and interprets // it with cloud-init. Windows and CentOS do not use cloud-init // on Azure. return errors.NotSupportedf("CustomScript extension for OS %q", os) } extensionSettings := map[string]*string{ "commandToExecute": to.StringPtr(commandToExecute), } extension := compute.VirtualMachineExtension{ Location: to.StringPtr(location), Tags: toTagsPtr(vmTags), Properties: &compute.VirtualMachineExtensionProperties{ Publisher: to.StringPtr(extensionPublisher), Type: to.StringPtr(extensionType), TypeHandlerVersion: to.StringPtr(extensionVersion), AutoUpgradeMinorVersion: to.BoolPtr(true), Settings: &extensionSettings, }, } _, err := vmExtensionClient.CreateOrUpdate( resourceGroup, vmName, extensionName, extension, ) return err }
func makeNetworkInterface(nicName, vmName string, ipConfigurations ...network.InterfaceIPConfiguration) network.Interface { tags := map[string]*string{"juju-machine-name": &vmName} return network.Interface{ Name: to.StringPtr(nicName), Tags: &tags, Properties: &network.InterfacePropertiesFormat{ IPConfigurations: &ipConfigurations, }, } }
func main() { name := "storage-account-name" c, err := helpers.LoadCredentials() if err != nil { log.Fatalf("Error: %v", err) } sid := c["subscriptionID"] tid := c["tenantID"] cid := c["clientID"] secret := c["clientSecret"] spt, err := azure.NewServicePrincipalToken(cid, secret, tid, azure.AzureResourceManagerScope) if err != nil { log.Fatalf("Error: %v", err) } arm := arm.NewClient(sid, spt) arm.RequestInspector = helpers.WithInspection() arm.ResponseInspector = helpers.ByInspecting() ac := arm.StorageAccounts() cna, err := ac.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: to.StringPtr(name), Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) if err != nil { log.Fatalf("Error: %v", err) } else { if to.Bool(cna.NameAvailable) { fmt.Printf("The name '%s' is available\n", name) } else { fmt.Printf("The name '%s' is unavailable because %s\n", name, to.String(cna.Message)) } } }
func createNetworkInterface( suffix string, group resources.ResourceGroup, subnet network.Subnet, arm arm.Client) (networkInterface network.Interface, err error) { pipc := arm.PublicIPAddresses() nicc := arm.NetworkInterfaces() groupName := *group.Name ipName := "ip" + suffix nicName := "nic" + suffix pipResult, err := pipc.CreateOrUpdate( groupName, ipName, network.PublicIPAddress{ Location: group.Location, Properties: &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Dynamic, }, }) if err != nil { err = fmt.Errorf("Failed to create public ip address '%s' in location '%s': '%s'\n", ipName, *group.Location, err.Error()) return } nicProps := network.InterfaceIPConfigurationPropertiesFormat{ PublicIPAddress: &pipResult, Subnet: &subnet} ipConfigs := make([]network.InterfaceIPConfiguration, 1, 1) ipConfigs[0] = network.InterfaceIPConfiguration{ Name: to.StringPtr(nicName + "Config"), Properties: &nicProps, } props := network.InterfacePropertiesFormat{IPConfigurations: &ipConfigs} networkInterface, err = nicc.CreateOrUpdate( groupName, nicName, network.Interface{ Location: group.Location, Properties: &props, }) if err != nil { err = fmt.Errorf("Failed to create network interface '%s' in location '%s': '%s'\n", nicName, *group.Location, err.Error()) } return }
// initResourceGroup creates and initialises a resource group for this // environment. The resource group will have a storage account and a // subnet associated with it (but not necessarily contained within: // see subnet creation). func (env *azureEnviron) initResourceGroup() (*config.Config, error) { location := env.config.location tags := tags.ResourceTags( names.NewModelTag(env.config.Config.UUID()), names.NewModelTag(env.config.Config.ControllerUUID()), env.config, ) resourceGroupsClient := resources.GroupsClient{env.resources} logger.Debugf("creating resource group %q", env.resourceGroup) _, err := resourceGroupsClient.CreateOrUpdate(env.resourceGroup, resources.Group{ Location: to.StringPtr(location), Tags: toTagsPtr(tags), }) if err != nil { return nil, errors.Annotate(err, "creating resource group") } // Create an internal network for all VMs in the // resource group to connect to. vnetPtr, err := createInternalVirtualNetwork( env.network, env.resourceGroup, location, tags, ) if err != nil { return nil, errors.Annotate(err, "creating virtual network") } _, err = createInternalSubnet( env.network, env.resourceGroup, vnetPtr, location, tags, ) if err != nil { return nil, errors.Annotate(err, "creating subnet") } // Create a storage account for the resource group. storageAccountsClient := storage.AccountsClient{env.storage} storageAccountName, storageAccountKey, err := createStorageAccount( storageAccountsClient, env.config.storageAccountType, env.resourceGroup, location, tags, env.provider.config.StorageAccountNameGenerator, ) if err != nil { return nil, errors.Annotate(err, "creating storage account") } return env.config.Config.Apply(map[string]interface{}{ configAttrStorageAccount: storageAccountName, configAttrStorageAccountKey: storageAccountKey, }) }
func (s *environSuite) assertStartInstanceRequests(c *gc.C) startInstanceRequests { // Clear the fields that don't get sent in the request. s.publicIPAddress.ID = nil s.publicIPAddress.Name = nil s.publicIPAddress.Properties.IPAddress = nil s.newNetworkInterface.ID = nil s.newNetworkInterface.Name = nil (*s.newNetworkInterface.Properties.IPConfigurations)[0].ID = nil s.jujuAvailabilitySet.ID = nil s.jujuAvailabilitySet.Name = nil s.virtualMachine.ID = nil s.virtualMachine.Name = nil s.virtualMachine.Properties.ProvisioningState = nil // Validate HTTP request bodies. c.Assert(s.requests, gc.HasLen, 8) c.Assert(s.requests[0].Method, gc.Equals, "GET") // vmSizes c.Assert(s.requests[1].Method, gc.Equals, "GET") // juju-testenv-model-deadbeef-0bad-400d-8000-4b1d0d06f00d c.Assert(s.requests[2].Method, gc.Equals, "GET") // skus c.Assert(s.requests[3].Method, gc.Equals, "PUT") assertRequestBody(c, s.requests[3], s.publicIPAddress) c.Assert(s.requests[4].Method, gc.Equals, "GET") // NICs c.Assert(s.requests[5].Method, gc.Equals, "PUT") assertRequestBody(c, s.requests[5], s.newNetworkInterface) c.Assert(s.requests[6].Method, gc.Equals, "PUT") assertRequestBody(c, s.requests[6], s.jujuAvailabilitySet) c.Assert(s.requests[7].Method, gc.Equals, "PUT") // CustomData is non-deterministic, so don't compare it. // TODO(axw) shouldn't CustomData be deterministic? Look into this. var virtualMachine compute.VirtualMachine unmarshalRequestBody(c, s.requests[7], &virtualMachine) c.Assert(to.String(virtualMachine.Properties.OsProfile.CustomData), gc.Not(gc.HasLen), 0) virtualMachine.Properties.OsProfile.CustomData = to.StringPtr("<juju-goes-here>") c.Assert(&virtualMachine, jc.DeepEquals, s.virtualMachine) return startInstanceRequests{ vmSizes: s.requests[0], subnet: s.requests[1], skus: s.requests[2], publicIPAddress: s.requests[3], nics: s.requests[4], networkInterface: s.requests[5], availabilitySet: s.requests[6], virtualMachine: s.requests[7], } }
func newOSProfile(vmName string, instanceConfig *instancecfg.InstanceConfig) (*compute.OSProfile, os.OSType, error) { logger.Debugf("creating OS profile for %q", vmName) customData, err := providerinit.ComposeUserData(instanceConfig, nil, AzureRenderer{}) if err != nil { return nil, os.Unknown, errors.Annotate(err, "composing user data") } osProfile := &compute.OSProfile{ ComputerName: to.StringPtr(vmName), CustomData: to.StringPtr(string(customData)), } seriesOS, err := jujuseries.GetOSFromSeries(instanceConfig.Series) if err != nil { return nil, os.Unknown, errors.Trace(err) } switch seriesOS { case os.Ubuntu, os.CentOS, os.Arch: // SSH keys are handled by custom data, but must also be // specified in order to forego providing a password, and // disable password authentication. publicKeys := []compute.SSHPublicKey{{ Path: to.StringPtr("/home/ubuntu/.ssh/authorized_keys"), KeyData: to.StringPtr(instanceConfig.AuthorizedKeys), }} osProfile.AdminUsername = to.StringPtr("ubuntu") osProfile.LinuxConfiguration = &compute.LinuxConfiguration{ DisablePasswordAuthentication: to.BoolPtr(true), SSH: &compute.SSHConfiguration{PublicKeys: &publicKeys}, } case os.Windows: osProfile.AdminUsername = to.StringPtr("JujuAdministrator") // A password is required by Azure, but we will never use it. // We generate something sufficiently long and random that it // should be infeasible to guess. osProfile.AdminPassword = to.StringPtr(randomAdminPassword()) osProfile.WindowsConfiguration = &compute.WindowsConfiguration{ ProvisionVMAgent: to.BoolPtr(true), EnableAutomaticUpdates: to.BoolPtr(true), // TODO(?) add WinRM configuration here. } default: return nil, os.Unknown, errors.NotSupportedf("%s", seriesOS) } return osProfile, seriesOS, nil }
func createNetwork( group resources.ResourceGroup, arm arm.Client) (snetResult network.Subnet, err error) { vnetc := arm.VirtualNetworks() snetc := arm.Subnets() name := *group.Name vnet := name + "vnet" subnet := name + "subnet" snet := network.Subnet{ Name: &subnet, Properties: &network.SubnetPropertiesFormat{AddressPrefix: to.StringPtr("10.0.0.0/24")}} snets := make([]network.Subnet, 1, 1) snets[0] = snet addrPrefixes := make([]string, 1, 1) addrPrefixes[0] = "10.0.0.0/16" address := network.AddressSpace{AddressPrefixes: &addrPrefixes} nwkProps := network.VirtualNetworkPropertiesFormat{AddressSpace: &address, Subnets: &snets} _, err = vnetc.CreateOrUpdate(name, vnet, network.VirtualNetwork{Location: group.Location, Properties: &nwkProps}) if err != nil { err = fmt.Errorf("Failed to create virtual network '%s' in location '%s': '%s'\n", vnet, *group.Location, err.Error()) return } snetResult, err = snetc.CreateOrUpdate(name, vnet, subnet, snet) if err != nil { err = fmt.Errorf("Failed to create subnet '%s' in location '%s': '%s'\n", subnet, *group.Location, err.Error()) } return }
// newStorageProfile creates the storage profile for a virtual machine, // based on the series and chosen instance spec. func newStorageProfile( vmName string, series string, instanceSpec *instances.InstanceSpec, storageEndpoint, storageAccountName string, ) (*compute.StorageProfile, error) { logger.Debugf("creating storage profile for %q", vmName) urnParts := strings.SplitN(instanceSpec.Image.Id, ":", 4) if len(urnParts) != 4 { return nil, errors.Errorf("invalid image ID %q", instanceSpec.Image.Id) } publisher := urnParts[0] offer := urnParts[1] sku := urnParts[2] version := urnParts[3] osDisksRoot := osDiskVhdRoot(storageEndpoint, storageAccountName) osDiskName := vmName osDisk := &compute.OSDisk{ Name: to.StringPtr(osDiskName), CreateOption: compute.FromImage, Caching: compute.ReadWrite, Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr( osDisksRoot + osDiskName + vhdExtension, ), }, } return &compute.StorageProfile{ ImageReference: &compute.ImageReference{ Publisher: to.StringPtr(publisher), Offer: to.StringPtr(offer), Sku: to.StringPtr(sku), Version: to.StringPtr(version), }, OsDisk: osDisk, }, nil }
// createAvailabilitySet creates the availability set for a machine to use // if it doesn't already exist, and returns the availability set's ID. The // algorithm used for choosing the availability set is: // - if there is a distribution group, use the same availability set as // the instances in that group. Instances in the group may be in // different availability sets (when multiple services colocated on a // machine), so we pick one arbitrarily // - if there is no distribution group, create an availability name with // a name based on the value of the tags.JujuUnitsDeployed tag in vmTags, // if it exists // - if there are no units assigned to the machine, then use the "juju" // availability set func createAvailabilitySet( client compute.AvailabilitySetsClient, vmName, resourceGroup, location string, vmTags, envTags map[string]string, distributionGroupFunc func() ([]instance.Id, error), instancesFunc func([]instance.Id) ([]instance.Instance, error), ) (string, error) { logger.Debugf("selecting availability set for %q", vmName) // First we check if there's a distribution group, and if so, // use the availability set of the first instance we find in it. var instanceIds []instance.Id if distributionGroupFunc != nil { var err error instanceIds, err = distributionGroupFunc() if err != nil { return "", errors.Annotate( err, "querying distribution group", ) } } instances, err := instancesFunc(instanceIds) switch err { case nil, environs.ErrPartialInstances, environs.ErrNoInstances: default: return "", errors.Annotate( err, "querying distribution group instances", ) } for _, instance := range instances { if instance == nil { continue } instance := instance.(*azureInstance) availabilitySetSubResource := instance.Properties.AvailabilitySet if availabilitySetSubResource == nil || availabilitySetSubResource.ID == nil { continue } logger.Debugf("- selecting availability set of %q", instance.Name) return to.String(availabilitySetSubResource.ID), nil } // We'll have to create an availability set. Use the name of one of the // services assigned to the machine. availabilitySetName := "juju" if unitNames, ok := vmTags[tags.JujuUnitsDeployed]; ok { for _, unitName := range strings.Fields(unitNames) { if !names.IsValidUnit(unitName) { continue } serviceName, err := names.UnitService(unitName) if err != nil { return "", errors.Annotate( err, "getting service name", ) } availabilitySetName = serviceName break } } logger.Debugf("- creating availability set %q", availabilitySetName) availabilitySet, err := client.CreateOrUpdate( resourceGroup, availabilitySetName, compute.AvailabilitySet{ Location: to.StringPtr(location), // NOTE(axw) we do *not* want to use vmTags here, // because an availability set is shared by machines. Tags: toTagsPtr(envTags), }, ) if err != nil { return "", errors.Annotatef( err, "creating availability set %q", availabilitySetName, ) } return to.String(availabilitySet.ID), nil }
// createVirtualMachine creates a virtual machine and related resources. // // All resources created are tagged with the specified "vmTags", so if // this function fails then all resources can be deleted by tag. func createVirtualMachine( resourceGroup, location, vmName string, vmTags, envTags map[string]string, instanceSpec *instances.InstanceSpec, instanceConfig *instancecfg.InstanceConfig, distributionGroupFunc func() ([]instance.Id, error), instancesFunc func([]instance.Id) ([]instance.Instance, error), apiPort *int, internalNetworkSubnet *network.Subnet, nsgID, storageEndpoint, storageAccountName string, networkClient network.ManagementClient, vmClient compute.VirtualMachinesClient, availabilitySetClient compute.AvailabilitySetsClient, vmExtensionClient compute.VirtualMachineExtensionsClient, ) (compute.VirtualMachine, error) { storageProfile, err := newStorageProfile( vmName, instanceConfig.Series, instanceSpec, storageEndpoint, storageAccountName, ) if err != nil { return compute.VirtualMachine{}, errors.Annotate(err, "creating storage profile") } osProfile, seriesOS, err := newOSProfile(vmName, instanceConfig) if err != nil { return compute.VirtualMachine{}, errors.Annotate(err, "creating OS profile") } networkProfile, err := newNetworkProfile( networkClient, vmName, apiPort, internalNetworkSubnet, nsgID, resourceGroup, location, vmTags, ) if err != nil { return compute.VirtualMachine{}, errors.Annotate(err, "creating network profile") } availabilitySetId, err := createAvailabilitySet( availabilitySetClient, vmName, resourceGroup, location, vmTags, envTags, distributionGroupFunc, instancesFunc, ) if err != nil { return compute.VirtualMachine{}, errors.Annotate(err, "creating availability set") } vmArgs := compute.VirtualMachine{ Location: to.StringPtr(location), Tags: toTagsPtr(vmTags), Properties: &compute.VirtualMachineProperties{ HardwareProfile: &compute.HardwareProfile{ VMSize: compute.VirtualMachineSizeTypes( instanceSpec.InstanceType.Name, ), }, StorageProfile: storageProfile, OsProfile: osProfile, NetworkProfile: networkProfile, AvailabilitySet: &compute.SubResource{ ID: to.StringPtr(availabilitySetId), }, }, } vm, err := vmClient.CreateOrUpdate(resourceGroup, vmName, vmArgs) if err != nil { return compute.VirtualMachine{}, errors.Annotate(err, "creating virtual machine") } // On Windows and CentOS, we must add the CustomScript VM // extension to run the CustomData script. switch seriesOS { case os.Windows, os.CentOS: if err := createVMExtension( vmExtensionClient, seriesOS, resourceGroup, vmName, location, vmTags, ); err != nil { return compute.VirtualMachine{}, errors.Annotate( err, "creating virtual machine extension", ) } } return vm, nil }
func main() { if len(os.Args) == 2 && os.Args[1] == "--help" { fmt.Println("usage: deploy [parameter-file-name [template-file-name]]") return } deploymentName := "simplelinux" groupName := "templatetests" groupLocation := "West US" arm, err := helpers.AuthenticateForARM() if err != nil { fmt.Printf("Failed to authenticate: '%s'\n", err.Error()) return } arm.RequestInspector = helpers.WithInspection() arm.ResponseInspector = helpers.ByInspecting() _, err = createResourceGroup(groupName, groupLocation, arm) if err != nil { fmt.Printf("Failed to create resource group '%s': '%s'\n", groupName, err.Error()) return } var parameterLink *string var parameters map[string]interface{} var templateLink *string if len(os.Args) >= 2 { pl := os.Args[1] parameterLink = &pl } if len(os.Args) >= 3 { tl := os.Args[2] templateLink = &tl } if parameterLink != nil { parameters, err = helpers.ReadMap(*parameterLink) if err != nil { fmt.Printf("Failed to read parameter file '%s': '%s'\n", *parameterLink, err.Error()) return } if p, ok := parameters["parameters"]; ok { parameters = p.(map[string]interface{}) } } else { parameters = map[string]interface{}{ "adminUsername": makeStringParameterValue("tmpltest"), "adminPassword": makeStringParameterValue("<<PLEASE EDIT>>"), "dnsLabelPrefix": makeStringParameterValue("<<MUST BE UNIQUE>>"), "ubuntuOSVersion": makeStringParameterValue("14.04.2-LTS"), } } var deploymentProps resources.DeploymentProperties if templateLink != nil { template, err := helpers.ReadMap(*templateLink) if err != nil { fmt.Printf("Failed to read template file '%s': '%s'\n", *templateLink, err.Error()) return } deploymentProps = resources.DeploymentProperties{ Template: &template, Parameters: ¶meters, Mode: resources.Incremental, } } else { deploymentProps = resources.DeploymentProperties{ TemplateLink: &resources.TemplateLink{ URI: to.StringPtr("https://raw.githubusercontent.com/NiklasGustafsson/azure-go-samples/master/arm/templates/deploy-template/template01.json"), ContentVersion: to.StringPtr("1.0.0.0"), }, Parameters: ¶meters, Mode: resources.Incremental, } } deployment, err := arm.Deployments().CreateOrUpdate(groupName, deploymentName, resources.Deployment{Properties: &deploymentProps}) if err != nil { if aerr, ok := err.(autorest.Error); ok { fmt.Printf("Failed to create resource deployment details: '%s'\n", aerr.Message()) } else { fmt.Printf("Failed to create resource deployment: '%s'\n", err.Error()) } return } fmt.Printf("Created resource deployment '%s'\n", *deployment.Name) }
func (s *storageSuite) TestDetachVolumes(c *gc.C) { // machine-0 has a three data disks: volume-0, volume-1 and volume-2 machine0DataDisks := []compute.DataDisk{{ Lun: to.IntPtr(0), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-0.vhd", fakeStorageAccount, )), }, }, { Lun: to.IntPtr(1), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-1.vhd", fakeStorageAccount, )), }, }, { Lun: to.IntPtr(2), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-2.vhd", fakeStorageAccount, )), }, }} makeParams := func(volume, machine string) storage.VolumeAttachmentParams { return storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Provider: "azure", Machine: names.NewMachineTag(machine), InstanceId: instance.Id("machine-" + machine), }, Volume: names.NewVolumeTag(volume), VolumeId: "volume-" + volume, } } params := []storage.VolumeAttachmentParams{ makeParams("1", "0"), makeParams("1", "0"), makeParams("42", "1"), makeParams("2", "42"), } virtualMachines := []compute.VirtualMachine{{ Name: to.StringPtr("machine-0"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine0DataDisks}, }, }, { Name: to.StringPtr("machine-1"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{}, }, }} // There should be a couple of API calls to list instances, // and one update per modified instance. nics := []network.Interface{ makeNetworkInterface("nic-0", "machine-0"), makeNetworkInterface("nic-1", "machine-1"), } nicsSender := azuretesting.NewSenderWithValue(network.InterfaceListResult{ Value: &nics, }) nicsSender.PathPattern = `.*/Microsoft\.Network/networkInterfaces` virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines` updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{}) updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ nicsSender, virtualMachinesSender, updateVirtualMachine0Sender, } results, err := volumeSource.DetachVolumes(params) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, len(params)) c.Check(results[0], jc.ErrorIsNil) c.Check(results[1], jc.ErrorIsNil) c.Check(results[2], jc.ErrorIsNil) c.Check(results[3], gc.ErrorMatches, "instance machine-42 not found") // Validate HTTP request bodies. c.Assert(s.requests, gc.HasLen, 3) c.Assert(s.requests[0].Method, gc.Equals, "GET") // list NICs c.Assert(s.requests[1].Method, gc.Equals, "GET") // list virtual machines c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 machine0DataDisks = []compute.DataDisk{ machine0DataDisks[0], machine0DataDisks[2], } virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks assertRequestBody(c, s.requests[2], &virtualMachines[0]) }
// allInstances returns all of the instances in the given resource group, // and optionally ensures that each instance's addresses are up-to-date. func (env *azureEnviron) allInstances( resourceGroup string, refreshAddresses bool, ) ([]instance.Instance, error) { env.mu.Lock() vmClient := compute.VirtualMachinesClient{env.compute} nicClient := network.InterfacesClient{env.network} pipClient := network.PublicIPAddressesClient{env.network} env.mu.Unlock() // Due to how deleting instances works, we have to get creative about // listing instances. We list NICs and return an instance for each // unique value of the jujuMachineNameTag tag. // // The machine provisioner will call AllInstances so it can delete // unknown instances. StopInstances must delete VMs before NICs and // public IPs, because a VM cannot have less than 1 NIC. Thus, we can // potentially delete a VM but then fail to delete its NIC. nicsResult, err := nicClient.List(resourceGroup) if err != nil { if nicsResult.Response.Response != nil && nicsResult.StatusCode == http.StatusNotFound { // This will occur if the resource group does not // exist, e.g. in a fresh hosted environment. return nil, nil } return nil, errors.Trace(err) } if nicsResult.Value == nil || len(*nicsResult.Value) == 0 { return nil, nil } // Create an azureInstance for each VM. result, err := vmClient.List(resourceGroup) if err != nil { return nil, errors.Annotate(err, "listing virtual machines") } vmNames := make(set.Strings) var azureInstances []*azureInstance if result.Value != nil { azureInstances = make([]*azureInstance, len(*result.Value)) for i, vm := range *result.Value { inst := &azureInstance{vm, env, nil, nil} azureInstances[i] = inst vmNames.Add(to.String(vm.Name)) } } // Create additional azureInstances for NICs without machines. See // comments above for rationale. This needs to happen before calling // setInstanceAddresses, so we still associate the NICs/PIPs. for _, nic := range *nicsResult.Value { vmName, ok := toTags(nic.Tags)[jujuMachineNameTag] if !ok || vmNames.Contains(vmName) { continue } vm := compute.VirtualMachine{ Name: to.StringPtr(vmName), Properties: &compute.VirtualMachineProperties{ ProvisioningState: to.StringPtr("Partially Deleted"), }, } inst := &azureInstance{vm, env, nil, nil} azureInstances = append(azureInstances, inst) vmNames.Add(to.String(vm.Name)) } if len(azureInstances) > 0 && refreshAddresses { if err := setInstanceAddresses( pipClient, resourceGroup, azureInstances, nicsResult, ); err != nil { return nil, errors.Trace(err) } } instances := make([]instance.Instance, len(azureInstances)) for i, inst := range azureInstances { instances[i] = inst } return instances, nil }
func (s *instanceSuite) TestInstancePorts(c *gc.C) { inst := s.getInstance(c) nsgSender := networkSecurityGroupSender([]network.SecurityRule{{ Name: to.StringPtr("machine-0-xyzzy"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolUDP, DestinationPortRange: to.StringPtr("*"), Access: network.Allow, Priority: to.IntPtr(200), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-tcpcp"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationPortRange: to.StringPtr("1000-2000"), Access: network.Allow, Priority: to.IntPtr(201), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-http"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolAsterisk, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.IntPtr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-00-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.IntPtr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationPortRange: to.StringPtr("80"), Access: network.Deny, Priority: to.IntPtr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.IntPtr(202), Direction: network.Outbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.IntPtr(199), // internal range Direction: network.Inbound, }, }}) s.sender = azuretesting.Senders{nsgSender} ports, err := inst.Ports("0") c.Assert(err, jc.ErrorIsNil) c.Assert(ports, jc.DeepEquals, []jujunetwork.PortRange{{ FromPort: 0, ToPort: 65535, Protocol: "udp", }, { FromPort: 1000, ToPort: 2000, Protocol: "tcp", }, { FromPort: 80, ToPort: 80, Protocol: "tcp", }, { FromPort: 80, ToPort: 80, Protocol: "udp", }}) }
func (s *storageSuite) TestCreateVolumes(c *gc.C) { // machine-1 has a single data disk with LUN 0. machine1DataDisks := []compute.DataDisk{{Lun: to.IntPtr(0)}} // machine-2 has 32 data disks; no LUNs free. machine2DataDisks := make([]compute.DataDisk, 32) for i := range machine2DataDisks { machine2DataDisks[i].Lun = to.IntPtr(i) } // volume-0 and volume-2 are attached to machine-0 // volume-1 is attached to machine-1 // volume-3 is attached to machine-42, but machine-42 is missing // volume-42 is attached to machine-2, but machine-2 has no free LUNs makeVolumeParams := func(volume, machine string, size uint64) storage.VolumeParams { return storage.VolumeParams{ Tag: names.NewVolumeTag(volume), Size: size, Provider: "azure", Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Provider: "azure", Machine: names.NewMachineTag(machine), InstanceId: instance.Id("machine-" + machine), }, Volume: names.NewVolumeTag(volume), }, } } params := []storage.VolumeParams{ makeVolumeParams("0", "0", 1), makeVolumeParams("1", "1", 1025), makeVolumeParams("2", "0", 1024), makeVolumeParams("3", "42", 40), makeVolumeParams("42", "2", 50), } virtualMachines := []compute.VirtualMachine{{ Name: to.StringPtr("machine-0"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{}, }, }, { Name: to.StringPtr("machine-1"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine1DataDisks}, }, }, { Name: to.StringPtr("machine-2"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine2DataDisks}, }, }} // There should be a couple of API calls to list instances, // and one update per modified instance. nics := []network.Interface{ makeNetworkInterface("nic-0", "machine-0"), makeNetworkInterface("nic-1", "machine-1"), makeNetworkInterface("nic-2", "machine-2"), } nicsSender := azuretesting.NewSenderWithValue(network.InterfaceListResult{ Value: &nics, }) nicsSender.PathPattern = `.*/Microsoft\.Network/networkInterfaces` virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines` updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{}) updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` updateVirtualMachine1Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{}) updateVirtualMachine1Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-1` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ nicsSender, virtualMachinesSender, updateVirtualMachine0Sender, updateVirtualMachine1Sender, } results, err := volumeSource.CreateVolumes(params) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, len(params)) c.Check(results[0].Error, jc.ErrorIsNil) c.Check(results[1].Error, jc.ErrorIsNil) c.Check(results[2].Error, jc.ErrorIsNil) c.Check(results[3].Error, gc.ErrorMatches, "instance machine-42 not found") c.Check(results[4].Error, gc.ErrorMatches, "choosing LUN: all LUNs are in use") // Validate HTTP request bodies. c.Assert(s.requests, gc.HasLen, 4) c.Assert(s.requests[0].Method, gc.Equals, "GET") // list NICs c.Assert(s.requests[1].Method, gc.Equals, "GET") // list virtual machines c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 c.Assert(s.requests[3].Method, gc.Equals, "PUT") // update machine-1 machine0DataDisks := []compute.DataDisk{{ Lun: to.IntPtr(0), DiskSizeGB: to.IntPtr(1), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-0.vhd", fakeStorageAccount, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }, { Lun: to.IntPtr(1), DiskSizeGB: to.IntPtr(1), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-2.vhd", fakeStorageAccount, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }} virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks assertRequestBody(c, s.requests[2], &virtualMachines[0]) machine1DataDisks = append(machine1DataDisks, compute.DataDisk{ Lun: to.IntPtr(1), DiskSizeGB: to.IntPtr(2), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.core.windows.net/datavhds/volume-1.vhd", fakeStorageAccount, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }) assertRequestBody(c, s.requests[3], &virtualMachines[1]) }
func (s *instanceSuite) TestInstanceOpenPortsAlreadyOpen(c *gc.C) { internalSubnetId := path.Join( "/subscriptions", fakeSubscriptionId, "resourceGroups/arbitrary/providers/Microsoft.Network/virtualnetworks/juju-internal/subnets", "juju-testenv-environment-"+testing.EnvironmentTag.Id(), ) ipConfiguration := network.InterfaceIPConfiguration{ Properties: &network.InterfaceIPConfigurationPropertiesFormat{ PrivateIPAddress: to.StringPtr("10.0.0.4"), Subnet: &network.SubResource{ ID: to.StringPtr(internalSubnetId), }, }, } s.networkInterfaces = []network.Interface{ makeNetworkInterface("nic-0", "machine-0", ipConfiguration), } inst := s.getInstance(c) okSender := mocks.NewSender() okSender.EmitContent("{}") nsgSender := networkSecurityGroupSender([]network.SecurityRule{{ Name: to.StringPtr("machine-0-tcp-1000"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolAsterisk, DestinationPortRange: to.StringPtr("1000"), Access: network.Allow, Priority: to.IntPtr(202), Direction: network.Inbound, }, }}) s.sender = azuretesting.Senders{nsgSender, okSender, okSender} err := inst.OpenPorts("0", []jujunetwork.PortRange{{ Protocol: "tcp", FromPort: 1000, ToPort: 1000, }, { Protocol: "udp", FromPort: 1000, ToPort: 2000, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(s.requests, gc.HasLen, 2) c.Assert(s.requests[0].Method, gc.Equals, "GET") c.Assert(s.requests[0].URL.Path, gc.Equals, internalSecurityGroupPath) c.Assert(s.requests[1].Method, gc.Equals, "PUT") c.Assert(s.requests[1].URL.Path, gc.Equals, securityRulePath("machine-0-udp-1000-2000")) assertRequestBody(c, s.requests[1], &network.SecurityRule{ Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("1000-2000/udp"), Protocol: network.SecurityRuleProtocolUDP, SourcePortRange: to.StringPtr("*"), SourceAddressPrefix: to.StringPtr("*"), DestinationPortRange: to.StringPtr("1000-2000"), DestinationAddressPrefix: to.StringPtr("10.0.0.4"), Access: network.Allow, Priority: to.IntPtr(200), Direction: network.Inbound, }, }) }