func getTestLoadBalancer(services ...api.Service) network.LoadBalancer { rules := []network.LoadBalancingRule{} probes := []network.Probe{} for _, service := range services { for _, port := range service.Spec.Ports { ruleName := getRuleName(&service, port) rules = append(rules, network.LoadBalancingRule{ Name: to.StringPtr(ruleName), Properties: &network.LoadBalancingRulePropertiesFormat{ FrontendPort: to.Int32Ptr(port.Port), BackendPort: to.Int32Ptr(port.Port), }, }) probes = append(probes, network.Probe{ Name: to.StringPtr(ruleName), Properties: &network.ProbePropertiesFormat{ Port: to.Int32Ptr(port.NodePort), }, }) } } lb := network.LoadBalancer{ Properties: &network.LoadBalancerPropertiesFormat{ LoadBalancingRules: &rules, Probes: &probes, }, } return lb }
// createVolume updates the provided VirtualMachine's StorageProfile with the // parameters for creating a new data disk. We don't actually interact with // the Azure API until after all changes to the VirtualMachine are made. func (v *azureVolumeSource) createVolume( vm *compute.VirtualMachine, p storage.VolumeParams, storageAccount *armstorage.Account, ) (*storage.Volume, *storage.VolumeAttachment, error) { lun, err := nextAvailableLUN(vm) if err != nil { return nil, nil, errors.Annotate(err, "choosing LUN") } dataDisksRoot := dataDiskVhdRoot(storageAccount) dataDiskName := p.Tag.String() vhdURI := dataDisksRoot + dataDiskName + vhdExtension sizeInGib := mibToGib(p.Size) dataDisk := compute.DataDisk{ Lun: to.Int32Ptr(lun), DiskSizeGB: to.Int32Ptr(int32(sizeInGib)), Name: to.StringPtr(dataDiskName), Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)}, Caching: compute.ReadWrite, CreateOption: compute.Empty, } var dataDisks []compute.DataDisk if vm.Properties.StorageProfile.DataDisks != nil { dataDisks = *vm.Properties.StorageProfile.DataDisks } dataDisks = append(dataDisks, dataDisk) vm.Properties.StorageProfile.DataDisks = &dataDisks // Data disks associate VHDs to machines. In Juju's storage model, // the VHD is the volume and the disk is the volume attachment. volume := storage.Volume{ p.Tag, storage.VolumeInfo{ VolumeId: dataDiskName, Size: gibToMib(sizeInGib), // We don't currently support persistent volumes in // Azure, as it requires removal of "comp=media" when // deleting VMs, complicating cleanup. Persistent: true, }, } volumeAttachment := storage.VolumeAttachment{ p.Tag, p.Attachment.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(lun), }, } return &volume, &volumeAttachment, nil }
// getSecurityRules creates network security group rules based on driver // configuration such as SSH port, docker port and swarm port. func (d *Driver) getSecurityRules(extraPorts []string) (*[]network.SecurityRule, error) { mkRule := func(priority int, name, description, srcPort, dstPort string, proto network.SecurityRuleProtocol) network.SecurityRule { return network.SecurityRule{ Name: to.StringPtr(name), Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr(description), SourceAddressPrefix: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr(srcPort), DestinationPortRange: to.StringPtr(dstPort), Access: network.Allow, Direction: network.Inbound, Protocol: proto, Priority: to.Int32Ptr(int32(priority)), }, } } log.Debugf("Docker port is configured as %d", d.DockerPort) // Base ports to be opened for any machine rl := []network.SecurityRule{ mkRule(100, "SSHAllowAny", "Allow ssh from public Internet", "*", fmt.Sprintf("%d", d.BaseDriver.SSHPort), network.TCP), mkRule(300, "DockerAllowAny", "Allow docker engine access (TLS-protected)", "*", fmt.Sprintf("%d", d.DockerPort), network.TCP), } // Open swarm port if configured if d.BaseDriver.SwarmMaster { swarmHost := d.BaseDriver.SwarmHost log.Debugf("Swarm host is configured as %q", swarmHost) u, err := url.Parse(swarmHost) if err != nil { return nil, fmt.Errorf("Cannot parse URL %q: %v", swarmHost, err) } _, swarmPort, err := net.SplitHostPort(u.Host) if err != nil { return nil, fmt.Errorf("Could not parse swarm port in %q: %v", u.Host, err) } rl = append(rl, mkRule(500, "DockerSwarmAllowAny", "Allow swarm manager access (TLS-protected)", "*", swarmPort, network.TCP)) } else { log.Debug("Swarm host is not configured.") } // extra port numbers requested by user basePri := 1000 for i, p := range extraPorts { port, protocol := driverutil.SplitPortProto(p) proto, err := parseSecurityRuleProtocol(protocol) if err != nil { return nil, fmt.Errorf("cannot parse security rule protocol: %v", err) } log.Debugf("User-requested port to be opened on NSG: %v/%s", port, proto) r := mkRule(basePri+i, fmt.Sprintf("Port%s%sAllowAny", port, proto), "User requested port to be accessible from Internet via docker-machine", "*", port, proto) rl = append(rl, r) } log.Debugf("Total NSG rules: %d", len(rl)) return &rl, nil }
func (s *TemplateBuilder) SetOSDiskSizeGB(diskSizeGB int32) error { resource, err := s.getResourceByType(resourceVirtualMachine) if err != nil { return err } profile := resource.Properties.StorageProfile profile.OsDisk.DiskSizeGB = to.Int32Ptr(diskSizeGB) return nil }
func makeSecurityRule(name, ipAddress, ports string) network.SecurityRule { return network.SecurityRule{ Name: to.StringPtr(name), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationAddressPrefix: to.StringPtr(ipAddress), DestinationPortRange: to.StringPtr(ports), Access: network.Allow, Priority: to.Int32Ptr(200), Direction: network.Inbound, }, } }
func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) { rules := []network.SecurityRule{} var i int32 for i = loadBalancerMinimumPriority; i < loadBalancerMaximumPriority; i++ { rules = append(rules, network.SecurityRule{ Properties: &network.SecurityRulePropertiesFormat{ Priority: to.Int32Ptr(i), }, }) } _, err := getNextAvailablePriority(rules) if err == nil { t.Error("Expectected an error. There are no priority levels left.") } }
// newStorageProfile creates the storage profile for a virtual machine, // based on the series and chosen instance spec. func newStorageProfile( vmName string, storageAccountName string, instanceSpec *instances.InstanceSpec, ) (*compute.StorageProfile, error) { logger.Debugf("creating storage profile for %q", vmName) urnParts := strings.SplitN(instanceSpec.Image.Id, ":", 4) if len(urnParts) != 4 { return nil, errors.Errorf("invalid image ID %q", instanceSpec.Image.Id) } publisher := urnParts[0] offer := urnParts[1] sku := urnParts[2] version := urnParts[3] osDisksRoot := fmt.Sprintf( `reference(resourceId('Microsoft.Storage/storageAccounts', '%s'), '%s').primaryEndpoints.blob`, storageAccountName, storage.APIVersion, ) osDiskName := vmName osDiskURI := fmt.Sprintf( `[concat(%s, '%s/%s%s')]`, osDisksRoot, osDiskVHDContainer, osDiskName, vhdExtension, ) osDiskSizeGB := mibToGB(instanceSpec.InstanceType.RootDisk) osDisk := &compute.OSDisk{ Name: to.StringPtr(osDiskName), CreateOption: compute.FromImage, Caching: compute.ReadWrite, Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(osDiskURI)}, DiskSizeGB: to.Int32Ptr(int32(osDiskSizeGB)), } return &compute.StorageProfile{ ImageReference: &compute.ImageReference{ Publisher: to.StringPtr(publisher), Offer: to.StringPtr(offer), Sku: to.StringPtr(sku), Version: to.StringPtr(version), }, OsDisk: osDisk, }, nil }
func TestSecurityRulePriorityPicksNextAvailablePriority(t *testing.T) { rules := []network.SecurityRule{} var expectedPriority int32 = loadBalancerMinimumPriority + 50 var i int32 for i = loadBalancerMinimumPriority; i < expectedPriority; i++ { rules = append(rules, network.SecurityRule{ Properties: &network.SecurityRulePropertiesFormat{ Priority: to.Int32Ptr(i), }, }) } priority, err := getNextAvailablePriority(rules) if err != nil { t.Errorf("Unexpectected error: %q", err) } if priority != expectedPriority { t.Errorf("Expected priority %d. Got priority %d.", expectedPriority, priority) } }
func (s *storageSuite) TestAttachVolumes(c *gc.C) { // machine-1 has a single data disk with LUN 0. machine1DataDisks := []compute.DataDisk{{ Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd", storageAccountName, )), }, }} // machine-2 has 32 data disks; no LUNs free. machine2DataDisks := make([]compute.DataDisk, 32) for i := range machine2DataDisks { machine2DataDisks[i].Lun = to.Int32Ptr(int32(i)) machine2DataDisks[i].Name = to.StringPtr(fmt.Sprintf("volume-%d", i)) machine2DataDisks[i].Vhd = &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-%d.vhd", storageAccountName, i, )), } } // volume-0 and volume-2 are attached to machine-0 // volume-1 is attached to machine-1 // volume-3 is attached to machine-42, but machine-42 is missing // volume-42 is attached to machine-2, but machine-2 has no free LUNs makeParams := func(volume, machine string, size uint64) storage.VolumeAttachmentParams { return storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Provider: "azure", Machine: names.NewMachineTag(machine), InstanceId: instance.Id("machine-" + machine), }, Volume: names.NewVolumeTag(volume), VolumeId: "volume-" + volume, } } params := []storage.VolumeAttachmentParams{ makeParams("0", "0", 1), makeParams("1", "1", 1025), makeParams("2", "0", 1024), makeParams("3", "42", 40), makeParams("42", "2", 50), } virtualMachines := []compute.VirtualMachine{{ Name: to.StringPtr("machine-0"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{}, }, }, { Name: to.StringPtr("machine-1"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine1DataDisks}, }, }, { Name: to.StringPtr("machine-2"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine2DataDisks}, }, }} // There should be a one API calls to list VMs, and one update per modified instance. virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines` updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{}) updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ virtualMachinesSender, s.accountSender(), updateVirtualMachine0Sender, } results, err := volumeSource.AttachVolumes(params) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, len(params)) c.Check(results[0].Error, jc.ErrorIsNil) c.Check(results[1].Error, jc.ErrorIsNil) c.Check(results[2].Error, jc.ErrorIsNil) c.Check(results[3].Error, gc.ErrorMatches, "instance machine-42 not found") c.Check(results[4].Error, gc.ErrorMatches, "choosing LUN: all LUNs are in use") // Validate HTTP request bodies. c.Assert(s.requests, gc.HasLen, 3) c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 machine0DataDisks := []compute.DataDisk{{ Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd", storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Attach, }, { Lun: to.Int32Ptr(1), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd", storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Attach, }} virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks assertRequestBody(c, s.requests[2], &virtualMachines[0]) }
func (s *environSuite) assertStartInstanceRequests( c *gc.C, requests []*http.Request, args assertStartInstanceRequestsParams, ) startInstanceRequests { nsgId := `[resourceId('Microsoft.Network/networkSecurityGroups', 'juju-internal-nsg')]` securityRules := []network.SecurityRule{{ Name: to.StringPtr("SSHInbound"), Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("Allow SSH access to all machines"), Protocol: network.TCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("*"), DestinationPortRange: to.StringPtr("22"), Access: network.Allow, Priority: to.Int32Ptr(100), Direction: network.Inbound, }, }, { Name: to.StringPtr("JujuAPIInbound"), Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("Allow API connections to controller machines"), Protocol: network.TCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("192.168.16.0/20"), DestinationPortRange: to.StringPtr("17777"), Access: network.Allow, Priority: to.Int32Ptr(101), Direction: network.Inbound, }, }} subnets := []network.Subnet{{ Name: to.StringPtr("juju-internal-subnet"), Properties: &network.SubnetPropertiesFormat{ AddressPrefix: to.StringPtr("192.168.0.0/20"), NetworkSecurityGroup: &network.SecurityGroup{ ID: to.StringPtr(nsgId), }, }, }, { Name: to.StringPtr("juju-controller-subnet"), Properties: &network.SubnetPropertiesFormat{ AddressPrefix: to.StringPtr("192.168.16.0/20"), NetworkSecurityGroup: &network.SecurityGroup{ ID: to.StringPtr(nsgId), }, }, }} subnetName := "juju-internal-subnet" privateIPAddress := "192.168.0.4" if args.availabilitySetName == "juju-controller" { subnetName = "juju-controller-subnet" privateIPAddress = "192.168.16.4" } subnetId := fmt.Sprintf( `[concat(resourceId('Microsoft.Network/virtualNetworks', 'juju-internal-network'), '/subnets/%s')]`, subnetName, ) publicIPAddressId := `[resourceId('Microsoft.Network/publicIPAddresses', 'machine-0-public-ip')]` ipConfigurations := []network.InterfaceIPConfiguration{{ Name: to.StringPtr("primary"), Properties: &network.InterfaceIPConfigurationPropertiesFormat{ Primary: to.BoolPtr(true), PrivateIPAddress: to.StringPtr(privateIPAddress), PrivateIPAllocationMethod: network.Static, Subnet: &network.Subnet{ID: to.StringPtr(subnetId)}, PublicIPAddress: &network.PublicIPAddress{ ID: to.StringPtr(publicIPAddressId), }, }, }} nicId := `[resourceId('Microsoft.Network/networkInterfaces', 'machine-0-primary')]` nics := []compute.NetworkInterfaceReference{{ ID: to.StringPtr(nicId), Properties: &compute.NetworkInterfaceReferenceProperties{ Primary: to.BoolPtr(true), }, }} vmDependsOn := []string{ nicId, `[resourceId('Microsoft.Storage/storageAccounts', '` + storageAccountName + `')]`, } addressPrefixes := []string{"192.168.0.0/20", "192.168.16.0/20"} templateResources := []armtemplates.Resource{{ APIVersion: network.APIVersion, Type: "Microsoft.Network/networkSecurityGroups", Name: "juju-internal-nsg", Location: "westus", Tags: to.StringMap(s.envTags), Properties: &network.SecurityGroupPropertiesFormat{ SecurityRules: &securityRules, }, }, { APIVersion: network.APIVersion, Type: "Microsoft.Network/virtualNetworks", Name: "juju-internal-network", Location: "westus", Tags: to.StringMap(s.envTags), Properties: &network.VirtualNetworkPropertiesFormat{ AddressSpace: &network.AddressSpace{&addressPrefixes}, Subnets: &subnets, }, DependsOn: []string{nsgId}, }, { APIVersion: storage.APIVersion, Type: "Microsoft.Storage/storageAccounts", Name: storageAccountName, Location: "westus", Tags: to.StringMap(s.envTags), StorageSku: &storage.Sku{ Name: storage.SkuName("Standard_LRS"), }, }} var availabilitySetSubResource *compute.SubResource if args.availabilitySetName != "" { availabilitySetId := fmt.Sprintf( `[resourceId('Microsoft.Compute/availabilitySets','%s')]`, args.availabilitySetName, ) templateResources = append(templateResources, armtemplates.Resource{ APIVersion: compute.APIVersion, Type: "Microsoft.Compute/availabilitySets", Name: args.availabilitySetName, Location: "westus", Tags: to.StringMap(s.envTags), }) availabilitySetSubResource = &compute.SubResource{ ID: to.StringPtr(availabilitySetId), } vmDependsOn = append([]string{availabilitySetId}, vmDependsOn...) } templateResources = append(templateResources, []armtemplates.Resource{{ APIVersion: network.APIVersion, Type: "Microsoft.Network/publicIPAddresses", Name: "machine-0-public-ip", Location: "westus", Tags: to.StringMap(s.vmTags), Properties: &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Dynamic, }, }, { APIVersion: network.APIVersion, Type: "Microsoft.Network/networkInterfaces", Name: "machine-0-primary", Location: "westus", Tags: to.StringMap(s.vmTags), Properties: &network.InterfacePropertiesFormat{ IPConfigurations: &ipConfigurations, }, DependsOn: []string{ publicIPAddressId, `[resourceId('Microsoft.Network/virtualNetworks', 'juju-internal-network')]`, }, }, { APIVersion: compute.APIVersion, Type: "Microsoft.Compute/virtualMachines", Name: "machine-0", Location: "westus", Tags: to.StringMap(s.vmTags), Properties: &compute.VirtualMachineProperties{ HardwareProfile: &compute.HardwareProfile{ VMSize: "Standard_D1", }, StorageProfile: &compute.StorageProfile{ ImageReference: args.imageReference, OsDisk: &compute.OSDisk{ Name: to.StringPtr("machine-0"), CreateOption: compute.FromImage, Caching: compute.ReadWrite, Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( `[concat(reference(resourceId('Microsoft.Storage/storageAccounts', '%s'), '%s').primaryEndpoints.blob, 'osvhds/machine-0.vhd')]`, storageAccountName, storage.APIVersion, )), }, DiskSizeGB: to.Int32Ptr(int32(args.diskSizeGB)), }, }, OsProfile: args.osProfile, NetworkProfile: &compute.NetworkProfile{&nics}, AvailabilitySet: availabilitySetSubResource, }, DependsOn: vmDependsOn, }}...) if args.vmExtension != nil { templateResources = append(templateResources, armtemplates.Resource{ APIVersion: compute.APIVersion, Type: "Microsoft.Compute/virtualMachines/extensions", Name: "machine-0/JujuCustomScriptExtension", Location: "westus", Tags: to.StringMap(s.vmTags), Properties: args.vmExtension, DependsOn: []string{"Microsoft.Compute/virtualMachines/machine-0"}, }) } templateMap := map[string]interface{}{ "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": templateResources, } deployment := &resources.Deployment{ &resources.DeploymentProperties{ Template: &templateMap, Mode: resources.Incremental, }, } // Validate HTTP request bodies. var startInstanceRequests startInstanceRequests if args.vmExtension != nil { // It must be Windows or CentOS, so // there should be no image query. c.Assert(requests, gc.HasLen, numExpectedStartInstanceRequests-1) c.Assert(requests[0].Method, gc.Equals, "GET") // vmSizes c.Assert(requests[1].Method, gc.Equals, "PUT") // create deployment startInstanceRequests.vmSizes = requests[0] startInstanceRequests.deployment = requests[1] } else { c.Assert(requests, gc.HasLen, numExpectedStartInstanceRequests) c.Assert(requests[0].Method, gc.Equals, "GET") // vmSizes c.Assert(requests[1].Method, gc.Equals, "GET") // skus c.Assert(requests[2].Method, gc.Equals, "PUT") // create deployment startInstanceRequests.vmSizes = requests[0] startInstanceRequests.skus = requests[1] startInstanceRequests.deployment = requests[2] } // Marshal/unmarshal the deployment we expect, so it's in map form. var expected resources.Deployment data, err := json.Marshal(&deployment) c.Assert(err, jc.ErrorIsNil) err = json.Unmarshal(data, &expected) c.Assert(err, jc.ErrorIsNil) // Check that we send what we expect. CustomData is non-deterministic, // so don't compare it. // TODO(axw) shouldn't CustomData be deterministic? Look into this. var actual resources.Deployment unmarshalRequestBody(c, startInstanceRequests.deployment, &actual) c.Assert(actual.Properties, gc.NotNil) c.Assert(actual.Properties.Template, gc.NotNil) resources := (*actual.Properties.Template)["resources"].([]interface{}) c.Assert(resources, gc.HasLen, len(templateResources)) vmResourceIndex := len(resources) - 1 if args.vmExtension != nil { vmResourceIndex-- } vmResource := resources[vmResourceIndex].(map[string]interface{}) vmResourceProperties := vmResource["properties"].(map[string]interface{}) osProfile := vmResourceProperties["osProfile"].(map[string]interface{}) osProfile["customData"] = "<juju-goes-here>" c.Assert(actual, jc.DeepEquals, expected) return startInstanceRequests }
func (s *environSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.storageClient = azuretesting.MockStorageClient{} s.sender = nil s.requests = nil s.retryClock = mockClock{Clock: gitjujutesting.NewClock(time.Time{})} s.provider = newProvider(c, azure.ProviderConfig{ Sender: azuretesting.NewSerialSender(&s.sender), RequestInspector: azuretesting.RequestRecorder(&s.requests), NewStorageClient: s.storageClient.NewClient, RetryClock: &gitjujutesting.AutoAdvancingClock{ &s.retryClock, s.retryClock.Advance, }, RandomWindowsAdminPassword: func() string { return "sorandom" }, InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) s.controllerUUID = testing.ControllerTag.Id() s.envTags = map[string]*string{ "juju-model-uuid": to.StringPtr(testing.ModelTag.Id()), "juju-controller-uuid": to.StringPtr(s.controllerUUID), } s.vmTags = map[string]*string{ "juju-model-uuid": to.StringPtr(testing.ModelTag.Id()), "juju-controller-uuid": to.StringPtr(s.controllerUUID), "juju-machine-name": to.StringPtr("machine-0"), } s.group = &resources.ResourceGroup{ Location: to.StringPtr("westus"), Tags: &s.envTags, Properties: &resources.ResourceGroupProperties{ ProvisioningState: to.StringPtr("Succeeded"), }, } vmSizes := []compute.VirtualMachineSize{{ Name: to.StringPtr("Standard_D1"), NumberOfCores: to.Int32Ptr(1), OsDiskSizeInMB: to.Int32Ptr(1047552), ResourceDiskSizeInMB: to.Int32Ptr(51200), MemoryInMB: to.Int32Ptr(3584), MaxDataDiskCount: to.Int32Ptr(2), }} s.vmSizes = &compute.VirtualMachineSizeListResult{Value: &vmSizes} s.storageAccount = &storage.Account{ Name: to.StringPtr("my-storage-account"), Type: to.StringPtr("Standard_LRS"), Tags: &s.envTags, Properties: &storage.AccountProperties{ PrimaryEndpoints: &storage.Endpoints{ Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", storageAccountName)), }, ProvisioningState: "Succeeded", }, } keys := []storage.AccountKey{{ KeyName: to.StringPtr("key-1-name"), Value: to.StringPtr("key-1"), Permissions: storage.FULL, }} s.storageAccountKeys = &storage.AccountListKeysResult{ Keys: &keys, } s.ubuntuServerSKUs = []compute.VirtualMachineImageResource{ {Name: to.StringPtr("12.04-LTS")}, {Name: to.StringPtr("12.10")}, {Name: to.StringPtr("14.04-LTS")}, {Name: to.StringPtr("15.04")}, {Name: to.StringPtr("15.10")}, {Name: to.StringPtr("16.04-LTS")}, } s.deployment = nil }
func (s *storageSuite) TestDetachVolumes(c *gc.C) { // machine-0 has a three data disks: volume-0, volume-1 and volume-2 machine0DataDisks := []compute.DataDisk{{ Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd", storageAccountName, )), }, }, { Lun: to.Int32Ptr(1), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd", storageAccountName, )), }, }, { Lun: to.Int32Ptr(2), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd", storageAccountName, )), }, }} makeParams := func(volume, machine string) storage.VolumeAttachmentParams { return storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Provider: "azure", Machine: names.NewMachineTag(machine), InstanceId: instance.Id("machine-" + machine), }, Volume: names.NewVolumeTag(volume), VolumeId: "volume-" + volume, } } params := []storage.VolumeAttachmentParams{ makeParams("1", "0"), makeParams("1", "0"), makeParams("42", "1"), makeParams("2", "42"), } virtualMachines := []compute.VirtualMachine{{ Name: to.StringPtr("machine-0"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{DataDisks: &machine0DataDisks}, }, }, { Name: to.StringPtr("machine-1"), Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{}, }, }} // There should be a one API calls to list VMs, and one update per modified instance. virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) virtualMachinesSender.PathPattern = `.*/Microsoft\.Compute/virtualMachines` updateVirtualMachine0Sender := azuretesting.NewSenderWithValue(&compute.VirtualMachine{}) updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ virtualMachinesSender, s.accountSender(), updateVirtualMachine0Sender, } results, err := volumeSource.DetachVolumes(params) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, len(params)) c.Check(results[0], jc.ErrorIsNil) c.Check(results[1], jc.ErrorIsNil) c.Check(results[2], jc.ErrorIsNil) c.Check(results[3], gc.ErrorMatches, "instance machine-42 not found") // Validate HTTP request bodies. c.Assert(s.requests, gc.HasLen, 3) c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 machine0DataDisks = []compute.DataDisk{ machine0DataDisks[0], machine0DataDisks[2], } virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks assertRequestBody(c, s.requests[2], &virtualMachines[0]) }
func (s *instanceSuite) TestInstanceOpenPortsAlreadyOpen(c *gc.C) { internalSubnetId := path.Join( "/subscriptions", fakeSubscriptionId, "resourceGroups/juju-testenv-model-deadbeef-0bad-400d-8000-4b1d0d06f00d", "providers/Microsoft.Network/virtualnetworks/juju-internal-network/subnets/juju-internal-subnet", ) ipConfiguration := network.InterfaceIPConfiguration{ Properties: &network.InterfaceIPConfigurationPropertiesFormat{ Primary: to.BoolPtr(true), PrivateIPAddress: to.StringPtr("10.0.0.4"), Subnet: &network.Subnet{ ID: to.StringPtr(internalSubnetId), }, }, } s.networkInterfaces = []network.Interface{ makeNetworkInterface("nic-0", "machine-0", ipConfiguration), } inst := s.getInstance(c) okSender := mocks.NewSender() okSender.AppendResponse(mocks.NewResponseWithContent("{}")) nsgSender := networkSecurityGroupSender([]network.SecurityRule{{ Name: to.StringPtr("machine-0-tcp-1000"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.Asterisk, DestinationPortRange: to.StringPtr("1000"), Access: network.Allow, Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }}) s.sender = azuretesting.Senders{nsgSender, okSender, okSender} err := inst.OpenPorts("0", []jujunetwork.PortRange{{ Protocol: "tcp", FromPort: 1000, ToPort: 1000, }, { Protocol: "udp", FromPort: 1000, ToPort: 2000, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(s.requests, gc.HasLen, 2) c.Assert(s.requests[0].Method, gc.Equals, "GET") c.Assert(s.requests[0].URL.Path, gc.Equals, internalSecurityGroupPath) c.Assert(s.requests[1].Method, gc.Equals, "PUT") c.Assert(s.requests[1].URL.Path, gc.Equals, securityRulePath("machine-0-udp-1000-2000")) assertRequestBody(c, s.requests[1], &network.SecurityRule{ Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("1000-2000/udp"), Protocol: network.UDP, SourcePortRange: to.StringPtr("*"), SourceAddressPrefix: to.StringPtr("*"), DestinationPortRange: to.StringPtr("1000-2000"), DestinationAddressPrefix: to.StringPtr("10.0.0.4"), Access: network.Allow, Priority: to.Int32Ptr(200), Direction: network.Inbound, }, }) }
// OpenPorts is specified in the Instance interface. func (inst *azureInstance) OpenPorts(machineId string, ports []jujunetwork.PortRange) error { nsgClient := network.SecurityGroupsClient{inst.env.network} securityRuleClient := network.SecurityRulesClient{inst.env.network} primaryNetworkAddress, err := inst.primaryNetworkAddress() if err != nil { return errors.Trace(err) } securityGroupName := internalSecurityGroupName var nsg network.SecurityGroup if err := inst.env.callAPI(func() (autorest.Response, error) { var err error nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName, "") return nsg.Response, err }); err != nil { return errors.Annotate(err, "querying network security group") } var securityRules []network.SecurityRule if nsg.Properties.SecurityRules != nil { securityRules = *nsg.Properties.SecurityRules } else { nsg.Properties.SecurityRules = &securityRules } // Create rules one at a time; this is necessary to avoid trampling // on changes made by the provisioner. We still record rules in the // NSG in memory, so we can easily tell which priorities are available. vmName := resourceName(names.NewMachineTag(machineId)) prefix := instanceNetworkSecurityRulePrefix(instance.Id(vmName)) for _, ports := range ports { ruleName := securityRuleName(prefix, ports) // Check if the rule already exists; OpenPorts must be idempotent. var found bool for _, rule := range securityRules { if to.String(rule.Name) == ruleName { found = true break } } if found { logger.Debugf("security rule %q already exists", ruleName) continue } logger.Debugf("creating security rule %q", ruleName) priority, err := nextSecurityRulePriority(nsg, securityRuleInternalMax+1, securityRuleMax) if err != nil { return errors.Annotatef(err, "getting security rule priority for %s", ports) } var protocol network.SecurityRuleProtocol switch ports.Protocol { case "tcp": protocol = network.TCP case "udp": protocol = network.UDP default: return errors.Errorf("invalid protocol %q", ports.Protocol) } var portRange string if ports.FromPort != ports.ToPort { portRange = fmt.Sprintf("%d-%d", ports.FromPort, ports.ToPort) } else { portRange = fmt.Sprint(ports.FromPort) } rule := network.SecurityRule{ Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr(ports.String()), Protocol: protocol, SourcePortRange: to.StringPtr("*"), DestinationPortRange: to.StringPtr(portRange), SourceAddressPrefix: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr(primaryNetworkAddress.Value), Access: network.Allow, Priority: to.Int32Ptr(priority), Direction: network.Inbound, }, } if err := inst.env.callAPI(func() (autorest.Response, error) { return securityRuleClient.CreateOrUpdate( inst.env.resourceGroup, securityGroupName, ruleName, rule, nil, // abort channel ) }); err != nil { return errors.Annotatef(err, "creating security rule for %s", ports) } securityRules = append(securityRules, rule) } return nil }
// This ensures load balancer exists and the frontend ip config is setup. // This also reconciles the Service's Ports with the LoadBalancer config. // This entails adding rules/probes for expected Ports and removing stale rules/ports. func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodeNames []string) (network.LoadBalancer, bool, error) { lbName := getLoadBalancerName(clusterName) serviceName := getServiceName(service) lbFrontendIPConfigName := getFrontendIPConfigName(service) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName) wantLb := len(service.Spec.Ports) > 0 dirtyLb := false // Ensure LoadBalancer's Backend Pool Configuration if wantLb { if lb.Properties.BackendAddressPools == nil || len(*lb.Properties.BackendAddressPools) == 0 { lb.Properties.BackendAddressPools = &[]network.BackendAddressPool{ { Name: to.StringPtr(lbBackendPoolName), }, } glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding", serviceName, wantLb) dirtyLb = true } else if len(*lb.Properties.BackendAddressPools) != 1 || !strings.EqualFold(*(*lb.Properties.BackendAddressPools)[0].Name, lbBackendPoolName) { return lb, false, fmt.Errorf("loadbalancer is misconfigured with a different backend pool") } } // Ensure LoadBalancer's Frontend IP Configurations dirtyConfigs := false newConfigs := []network.FrontendIPConfiguration{} if lb.Properties.FrontendIPConfigurations != nil { newConfigs = *lb.Properties.FrontendIPConfigurations } if !wantLb { for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] if strings.EqualFold(*config.Name, lbFrontendIPConfigName) { glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } } } else { foundConfig := false for _, config := range newConfigs { if strings.EqualFold(*config.Name, lbFrontendIPConfigName) { foundConfig = true break } } if !foundConfig { newConfigs = append(newConfigs, network.FrontendIPConfiguration{ Name: to.StringPtr(lbFrontendIPConfigName), Properties: &network.FrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &network.PublicIPAddress{ ID: pip.ID, }, }, }) glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) dirtyConfigs = true } } if dirtyConfigs { dirtyLb = true lb.Properties.FrontendIPConfigurations = &newConfigs } // update probes/rules expectedProbes := make([]network.Probe, len(service.Spec.Ports)) expectedRules := make([]network.LoadBalancingRule, len(service.Spec.Ports)) for i, port := range service.Spec.Ports { lbRuleName := getRuleName(service, port) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { return lb, false, err } if serviceapi.NeedsHealthCheck(service) { podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service) expectedProbes[i] = network.Probe{ Name: &lbRuleName, Properties: &network.ProbePropertiesFormat{ RequestPath: to.StringPtr(podPresencePath), Protocol: network.ProbeProtocolHTTP, Port: to.Int32Ptr(podPresencePort), IntervalInSeconds: to.Int32Ptr(5), NumberOfProbes: to.Int32Ptr(2), }, } } else { expectedProbes[i] = network.Probe{ Name: &lbRuleName, Properties: &network.ProbePropertiesFormat{ Protocol: probeProto, Port: to.Int32Ptr(port.NodePort), IntervalInSeconds: to.Int32Ptr(5), NumberOfProbes: to.Int32Ptr(2), }, } } expectedRules[i] = network.LoadBalancingRule{ Name: &lbRuleName, Properties: &network.LoadBalancingRulePropertiesFormat{ Protocol: transportProto, FrontendIPConfiguration: &network.SubResource{ ID: to.StringPtr(lbFrontendIPConfigID), }, BackendAddressPool: &network.SubResource{ ID: to.StringPtr(lbBackendPoolID), }, Probe: &network.SubResource{ ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), }, FrontendPort: to.Int32Ptr(port.Port), BackendPort: to.Int32Ptr(port.Port), EnableFloatingIP: to.BoolPtr(true), }, } } // remove unwanted probes dirtyProbes := false var updatedProbes []network.Probe if lb.Properties.Probes != nil { updatedProbes = *lb.Properties.Probes } for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if serviceOwnsRule(service, *existingProbe.Name) { glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) dirtyProbes = true } } } // add missing, wanted probes for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) foundProbe = true } if !foundProbe { glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } } if dirtyProbes { dirtyLb = true lb.Properties.Probes = &updatedProbes } // update rules dirtyRules := false var updatedRules []network.LoadBalancingRule if lb.Properties.LoadBalancingRules != nil { updatedRules = *lb.Properties.LoadBalancingRules } // update rules: remove unwanted for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if serviceOwnsRule(service, *existingRule.Name) { keepRule := false glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) if findRule(expectedRules, existingRule) { glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } } } // update rules: add needed for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule) { glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } } if dirtyRules { dirtyLb = true lb.Properties.LoadBalancingRules = &updatedRules } return lb, dirtyLb, nil }
// controller machines securityRuleInternalAPIInbound ) var ( sshSecurityRule = network.SecurityRule{ Name: to.StringPtr("SSHInbound"), Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("Allow SSH access to all machines"), Protocol: network.TCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("*"), DestinationPortRange: to.StringPtr("22"), Access: network.Allow, Priority: to.Int32Ptr(securityRuleInternalSSHInbound), Direction: network.Inbound, }, } apiSecurityRule = network.SecurityRule{ Name: to.StringPtr("JujuAPIInbound"), Properties: &network.SecurityRulePropertiesFormat{ Description: to.StringPtr("Allow API connections to controller machines"), Protocol: network.TCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr(controllerSubnetPrefix), // DestinationPortRange is set by createInternalNetworkSecurityGroup. Access: network.Allow, Priority: to.Int32Ptr(securityRuleInternalAPIInbound),
func (v *azureVolumeSource) attachVolume( vm *compute.VirtualMachine, p storage.VolumeAttachmentParams, storageAccount *armstorage.Account, ) (_ *storage.VolumeAttachment, updated bool, _ error) { storageAccount, err := v.env.getStorageAccount(false) if err != nil { return nil, false, errors.Trace(err) } dataDisksRoot := dataDiskVhdRoot(storageAccount) dataDiskName := p.VolumeId vhdURI := dataDisksRoot + dataDiskName + vhdExtension var dataDisks []compute.DataDisk if vm.Properties.StorageProfile.DataDisks != nil { dataDisks = *vm.Properties.StorageProfile.DataDisks } for _, disk := range dataDisks { if to.String(disk.Name) != p.VolumeId { continue } if to.String(disk.Vhd.URI) != vhdURI { continue } // Disk is already attached. volumeAttachment := &storage.VolumeAttachment{ p.Volume, p.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(to.Int32(disk.Lun)), }, } return volumeAttachment, false, nil } lun, err := nextAvailableLUN(vm) if err != nil { return nil, false, errors.Annotate(err, "choosing LUN") } dataDisk := compute.DataDisk{ Lun: to.Int32Ptr(lun), Name: to.StringPtr(dataDiskName), Vhd: &compute.VirtualHardDisk{to.StringPtr(vhdURI)}, Caching: compute.ReadWrite, CreateOption: compute.Attach, } dataDisks = append(dataDisks, dataDisk) vm.Properties.StorageProfile.DataDisks = &dataDisks volumeAttachment := storage.VolumeAttachment{ p.Volume, p.Machine, storage.VolumeAttachmentInfo{ BusAddress: diskBusAddress(lun), }, } return &volumeAttachment, true, nil }
// This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service) (network.SecurityGroup, bool, error) { serviceName := getServiceName(service) wantLb := len(service.Spec.Ports) > 0 sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service) if err != nil { return sg, false, err } var sourceAddressPrefixes []string if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) { sourceAddressPrefixes = []string{"Internet"} } else { for _, ip := range sourceRanges { sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String()) } } expectedSecurityRules := make([]network.SecurityRule, len(service.Spec.Ports)*len(sourceAddressPrefixes)) for i, port := range service.Spec.Ports { securityRuleName := getRuleName(service, port) _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { return sg, false, err } for j := range sourceAddressPrefixes { ix := i*len(sourceAddressPrefixes) + j expectedSecurityRules[ix] = network.SecurityRule{ Name: to.StringPtr(securityRuleName), Properties: &network.SecurityRulePropertiesFormat{ Protocol: securityProto, SourcePortRange: to.StringPtr("*"), DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr("*"), Access: network.Allow, Direction: network.Inbound, }, } } } // update security rules dirtySg := false var updatedRules []network.SecurityRule if sg.Properties.SecurityRules != nil { updatedRules = *sg.Properties.SecurityRules } // update security rules: remove unwanted for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if serviceOwnsRule(service, *existingRule.Name) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) keepRule := false if findSecurityRule(expectedSecurityRules, existingRule) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtySg = true } } } // update security rules: add needed for _, expectedRule := range expectedSecurityRules { foundRule := false if findSecurityRule(updatedRules, expectedRule) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { return sg, false, err } expectedRule.Properties.Priority = to.Int32Ptr(nextAvailablePriority) updatedRules = append(updatedRules, expectedRule) dirtySg = true } } if dirtySg { sg.Properties.SecurityRules = &updatedRules } return sg, dirtySg, nil }
func (s *instanceSuite) TestInstancePorts(c *gc.C) { inst := s.getInstance(c) nsgSender := networkSecurityGroupSender([]network.SecurityRule{{ Name: to.StringPtr("machine-0-xyzzy"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.UDP, DestinationPortRange: to.StringPtr("*"), Access: network.Allow, Priority: to.Int32Ptr(200), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-tcpcp"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationPortRange: to.StringPtr("1000-2000"), Access: network.Allow, Priority: to.Int32Ptr(201), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-http"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.Asterisk, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-00-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Deny, Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.Int32Ptr(202), Direction: network.Outbound, }, }, { Name: to.StringPtr("machine-0-ignored"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, Priority: to.Int32Ptr(199), // internal range Direction: network.Inbound, }, }}) s.sender = azuretesting.Senders{nsgSender} ports, err := inst.Ports("0") c.Assert(err, jc.ErrorIsNil) c.Assert(ports, jc.DeepEquals, []jujunetwork.PortRange{{ FromPort: 0, ToPort: 65535, Protocol: "udp", }, { FromPort: 1000, ToPort: 2000, Protocol: "tcp", }, { FromPort: 80, ToPort: 80, Protocol: "tcp", }, { FromPort: 80, ToPort: 80, Protocol: "udp", }}) }