Пример #1
0
// getQuota achieves the aims of GetQuota().
func (p *openstackp) getQuota() (quota *Quota, err error) {
	// query our quota
	q, err := quotasets.Get(p.computeClient, os.Getenv("OS_TENANT_ID")).Extract()
	if err != nil {
		return
	}
	quota = &Quota{
		MaxRAM:       q.Ram,
		MaxCores:     q.Cores,
		MaxInstances: q.Instances,
	}

	// query all servers to figure out what we've used of our quota
	// (*** gophercloud currently doesn't implement getting this properly)
	pager := servers.List(p.computeClient, servers.ListOpts{})
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		serverList, err := servers.ExtractServers(page)
		if err != nil {
			return false, err
		}

		for _, server := range serverList {
			quota.UsedInstances++
			f, found := p.fmap[server.Flavor["id"].(string)]
			if found { // should always be found...
				quota.UsedCores += f.Cores
				quota.UsedRAM += f.RAM
			}
		}

		return true, nil
	})

	return
}
Пример #2
0
func TestListServers(t *testing.T) {
	th.SetupHTTP()
	defer th.TeardownHTTP()
	HandleServerListSuccessfully(t)

	pages := 0
	err := servers.List(client.ServiceClient(), servers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
		pages++

		actual, err := servers.ExtractServers(page)
		if err != nil {
			return false, err
		}

		if len(actual) != 3 {
			t.Fatalf("Expected 3 servers, got %d", len(actual))
		}
		th.CheckDeepEquals(t, ServerHerp, actual[0])
		th.CheckDeepEquals(t, ServerDerp, actual[1])
		th.CheckDeepEquals(t, ServerMerp, actual[2])

		return true, nil
	})

	th.AssertNoErr(t, err)

	if pages != 1 {
		t.Errorf("Expected 1 page, saw %d", pages)
	}
}
Пример #3
0
// tearDown achieves the aims of TearDown()
func (p *openstackp) tearDown(resources *Resources) (err error) {
	// throughout we'll ignore errors because we want to try and delete
	// as much as possible; we'll end up returning the last error we encountered

	// delete servers, except for ourselves
	pager := servers.List(p.computeClient, servers.ListOpts{})
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		serverList, err := servers.ExtractServers(page)
		if err != nil {
			return false, err
		}

		for _, server := range serverList {
			if p.ownName != server.Name && strings.HasPrefix(server.Name, resources.ResourceName) {
				p.destroyServer(server.ID) // ignore errors, just try to delete others
			}
		}

		return true, nil
	})

	if p.ownName == "" {
		// delete router
		if id := resources.Details["router"]; id != "" {
			if subnetid := resources.Details["subnet"]; subnetid != "" {
				// remove the interface from our router first
				_, err = routers.RemoveInterface(p.networkClient, id, routers.RemoveInterfaceOpts{SubnetID: subnetid}).Extract()
			}
			err = routers.Delete(p.networkClient, id).ExtractErr()
		}

		// delete network (and its subnet)
		if id := resources.Details["network"]; id != "" {
			err = networks.Delete(p.networkClient, id).ExtractErr()
		}

		// delete secgroup
		if id := resources.Details["secgroup"]; id != "" {
			err = secgroups.Delete(p.computeClient, id).ExtractErr()
		}
	}

	// delete keypair, unless we're running in OpenStack and securityGroup and
	// keypair have the same resourcename, indicating our current server needs
	// the same keypair we used to spawn our servers
	if id := resources.Details["keypair"]; id != "" {
		if p.ownName == "" || (p.securityGroup != "" && p.securityGroup != id) {
			err = keypairs.Delete(p.computeClient, id).ExtractErr()
			resources.PrivateKey = ""
		}
	}

	return
}
Пример #4
0
func TestListAllServers(t *testing.T) {
	th.SetupHTTP()
	defer th.TeardownHTTP()
	HandleServerListSuccessfully(t)

	allPages, err := servers.List(client.ServiceClient(), servers.ListOpts{}).AllPages()
	th.AssertNoErr(t, err)
	actual, err := servers.ExtractServers(allPages)
	th.AssertNoErr(t, err)
	th.CheckDeepEquals(t, ServerHerp, actual[0])
	th.CheckDeepEquals(t, ServerDerp, actual[1])
}
Пример #5
0
func TestServersList(t *testing.T) {
	client, err := clients.NewComputeV2Client()
	if err != nil {
		t.Fatalf("Unable to create a compute client: %v", err)
	}

	allPages, err := servers.List(client, servers.ListOpts{}).AllPages()
	if err != nil {
		t.Fatalf("Unable to retrieve servers: %v", err)
	}

	allServers, err := servers.ExtractServers(allPages)
	if err != nil {
		t.Fatalf("Unable to extract servers: %v", err)
	}

	for _, server := range allServers {
		PrintServer(t, &server)
	}
}
Пример #6
0
// deploy achieves the aims of Deploy().
func (p *openstackp) deploy(resources *Resources, requiredPorts []int) (err error) {
	// the resource name can only contain letters, numbers, underscores,
	// spaces and hyphens
	if !openstackValidResourceNameRegexp.MatchString(resources.ResourceName) {
		err = Error{"openstack", "deploy", ErrBadResourceName}
		return
	}

	// get/create key pair
	kp, err := keypairs.Get(p.computeClient, resources.ResourceName).Extract()
	if err != nil {
		if _, notfound := err.(gophercloud.ErrDefault404); notfound {
			// create a new keypair; we can't just let Openstack create one for
			// us because in latest versions it does not return a DER encoded
			// key, which is what GO built-in library supports.
			privateKey, errk := rsa.GenerateKey(rand.Reader, 1024)
			if errk != nil {
				err = errk
				return
			}
			privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
			privateKeyPEMBytes := pem.EncodeToMemory(privateKeyPEM)
			pub, errk := ssh.NewPublicKey(&privateKey.PublicKey)
			if errk != nil {
				err = errk
				return err
			}
			publicKeyStr := ssh.MarshalAuthorizedKey(pub)

			kp, err = keypairs.Create(p.computeClient, keypairs.CreateOpts{Name: resources.ResourceName, PublicKey: string(publicKeyStr)}).Extract()
			if err != nil {
				return
			}

			resources.PrivateKey = string(privateKeyPEMBytes)
		} else {
			return
		}
	}
	resources.Details["keypair"] = kp.Name

	// based on hostname, see if we're currently running on an openstack server,
	// in which case we'll use this server's security group and network.
	hostname, err := os.Hostname()
	inCloud := false
	if err == nil {
		pager := servers.List(p.computeClient, servers.ListOpts{})
		err = pager.EachPage(func(page pagination.Page) (bool, error) {
			serverList, err := servers.ExtractServers(page)
			if err != nil {
				return false, err
			}

			for _, server := range serverList {
				if server.Name == hostname {
					p.ownName = hostname

					// get the first networkUUID we come across *** not sure
					// what the other possibilities are and what else we can do
					// instead
					for networkName := range server.Addresses {
						networkUUID, _ := networks.IDFromName(p.networkClient, networkName)
						if networkUUID != "" {
							p.networkName = networkName
							p.networkUUID = networkUUID
							break
						}
					}

					// get the first security group *** again, not sure how to
					// pick the "best" if more than one
					for _, smap := range server.SecurityGroups {
						if value, found := smap["name"]; found && value.(string) != "" {
							p.securityGroup = value.(string)
							break
						}
					}

					if p.networkUUID != "" && p.securityGroup != "" {
						inCloud = true
						return false, nil
					}
				}
			}

			return true, nil
		})
	}

	//*** actually, if in cloud, we should create a security group that allows
	// the given ports, only accessible by things in the current security group
	// don't create any more resources if we're already running in OpenStack
	if inCloud {
		return
	}

	// get/create security group
	pager := secgroups.List(p.computeClient)
	var group *secgroups.SecurityGroup
	foundGroup := false
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		groupList, err := secgroups.ExtractSecurityGroups(page)
		if err != nil {
			return false, err
		}

		for _, g := range groupList {
			if g.Name == resources.ResourceName {
				group = &g
				foundGroup = true
				return false, nil
			}
		}

		return true, nil
	})
	if err != nil {
		return
	}
	if !foundGroup {
		// create a new security group with rules allowing the desired ports
		group, err = secgroups.Create(p.computeClient, secgroups.CreateOpts{Name: resources.ResourceName, Description: "access amongst wr-spawned nodes"}).Extract()
		if err != nil {
			return
		}

		//*** check if the rules are already there, in case we previously died
		// between previous line and this one
		for _, port := range requiredPorts {
			_, err = secgroups.CreateRule(p.computeClient, secgroups.CreateRuleOpts{
				ParentGroupID: group.ID,
				FromPort:      port,
				ToPort:        port,
				IPProtocol:    "TCP",
				CIDR:          "0.0.0.0/0", // FromGroupID: group.ID if we were creating a head node and then wanted a rule for all worker nodes...
			}).Extract()
			if err != nil {
				return
			}
		}

		// ICMP may help networking work as expected
		_, err = secgroups.CreateRule(p.computeClient, secgroups.CreateRuleOpts{
			ParentGroupID: group.ID,
			FromPort:      0,
			ToPort:        0, // *** results in a port of '0', which is not the same as "ALL ICMP" which then says "Any" in the web interface
			IPProtocol:    "ICMP",
			CIDR:          "0.0.0.0/0",
		}).Extract()
		if err != nil {
			return
		}
	}
	resources.Details["secgroup"] = group.ID
	p.securityGroup = resources.ResourceName

	// get/create network
	var network *networks.Network
	networkID, err := networks.IDFromName(p.networkClient, resources.ResourceName)
	if err != nil {
		if _, notfound := err.(gophercloud.ErrResourceNotFound); notfound {
			// create a network for ourselves
			network, err = networks.Create(p.networkClient, networks.CreateOpts{Name: resources.ResourceName, AdminStateUp: gophercloud.Enabled}).Extract()
			if err != nil {
				return
			}
			networkID = network.ID
		} else {
			return
		}
	} else {
		network, err = networks.Get(p.networkClient, networkID).Extract()
		if err != nil {
			return
		}
	}
	resources.Details["network"] = networkID
	p.networkName = resources.ResourceName
	p.networkUUID = networkID

	// get/create subnet
	var subnetID string
	if len(network.Subnets) == 1 {
		subnetID = network.Subnets[0]
		// *** check it's valid? could we end up with more than 1 subnet?
	} else {
		// add a big enough subnet
		var gip = new(string)
		*gip = "192.168.0.1"
		var subnet *subnets.Subnet
		subnet, err = subnets.Create(p.networkClient, subnets.CreateOpts{
			NetworkID:      networkID,
			CIDR:           "192.168.0.0/16",
			GatewayIP:      gip,
			DNSNameservers: dnsNameServers[:], // this is critical, or servers on new networks can't be ssh'd to for many minutes
			IPVersion:      4,
			Name:           resources.ResourceName,
		}).Extract()
		if err != nil {
			return
		}
		subnetID = subnet.ID
	}
	resources.Details["subnet"] = subnetID

	// get/create router
	var routerID string
	pager = routers.List(p.networkClient, routers.ListOpts{Name: resources.ResourceName})
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		routerList, err := routers.ExtractRouters(page)
		if err != nil {
			return false, err
		}
		routerID = routerList[0].ID
		// *** check it's valid? could we end up with more than 1 router?
		return false, nil
	})
	if err != nil {
		return
	}
	if routerID == "" {
		var router *routers.Router
		router, err = routers.Create(p.networkClient, routers.CreateOpts{
			Name:         resources.ResourceName,
			GatewayInfo:  &routers.GatewayInfo{NetworkID: p.externalNetworkID},
			AdminStateUp: gophercloud.Enabled,
		}).Extract()
		if err != nil {
			return
		}

		routerID = router.ID

		// add our subnet
		_, err = routers.AddInterface(p.networkClient, routerID, routers.AddInterfaceOpts{SubnetID: subnetID}).Extract()
		if err != nil {
			// if this fails, we'd be stuck with a useless router, so we try and
			// delete it
			routers.Delete(p.networkClient, router.ID)
			return
		}
	}
	resources.Details["router"] = routerID

	return
}