func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { if s.server == nil { return } config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) // We need the v2 compute client computeClient, err := config.computeV2Client() if err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return } ui.Say(fmt.Sprintf("Terminating the source server: %s ...", s.server.ID)) if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED", "SHUTOFF", "STOPPED"}, Refresh: ServerStateRefreshFunc(computeClient, s.server), Target: []string{"DELETED"}, } WaitForState(&stateChange) }
// DeleteServer deletes an instance via its UUID. // A fatal error will occur if the instance failed to be destroyed. This works // best when using it as a deferred function. func DeleteServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server) { err := servers.Delete(client, server.ID).ExtractErr() if err != nil { t.Fatalf("Unable to delete server %s: %s", server.ID, err) } t.Logf("Deleted server: %s", server.ID) }
func TestDeleteServer(t *testing.T) { th.SetupHTTP() defer th.TeardownHTTP() HandleServerDeletionSuccessfully(t) res := servers.Delete(client.ServiceClient(), "asdfasdfasdf") th.AssertNoErr(t, res.Err) }
// destroyServer achieves the aims of DestroyServer() func (p *openstackp) destroyServer(serverID string) (err error) { err = servers.Delete(p.computeClient, serverID).ExtractErr() if err != nil { return } // wait for it to really be deleted, or we won't be able to // delete the router and network later; the following returns // an error of "Resource not found" as soon as the server // is not there anymore; we don't care about any others servers.WaitForStatus(p.computeClient, serverID, "xxxx", 60) return }
func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) computeClient, err := config.computeV2Client(GetRegion(d)) if err != nil { return fmt.Errorf("Error creating OpenStack compute client: %s", err) } // Make sure all volumes are detached before deleting volumes := d.Get("volume") if volumeSet, ok := volumes.(*schema.Set); ok { volumeList := volumeSet.List() if len(volumeList) > 0 { log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", volumeList) if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { return err } else { if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), volumeList); err != nil { return err } } } } if d.Get("stop_before_destroy").(bool) { err = startstop.Stop(computeClient, d.Id()).ExtractErr() if err != nil { log.Printf("[WARN] Error stopping OpenStack instance: %s", err) } else { stopStateConf := &resource.StateChangeConf{ Pending: []string{"ACTIVE"}, Target: []string{"SHUTOFF"}, Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), Timeout: 3 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } log.Printf("[DEBUG] Waiting for instance (%s) to stop", d.Id()) _, err = stopStateConf.WaitForState() if err != nil { log.Printf("[WARN] Error waiting for instance (%s) to stop: %s, proceeding to delete", d.Id(), err) } } } err = servers.Delete(computeClient, d.Id()).ExtractErr() if err != nil { return fmt.Errorf("Error deleting OpenStack server: %s", err) } // Wait for the instance to delete before moving on. log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"ACTIVE", "SHUTOFF"}, Target: []string{"DELETED"}, Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), Timeout: 30 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for instance (%s) to delete: %s", d.Id(), err) } d.SetId("") return nil }
// spawn achieves the aims of Spawn() func (p *openstackp) spawn(resources *Resources, osPrefix string, flavorID string, externalIP bool, postCreationScript []byte) (serverID string, serverIP string, adminPass string, err error) { // get available images, pick the one that matches desired OS // *** rackspace API lets you filter on eg. os_distro=ubuntu and os_version=12.04; can we do the same here? pager := images.ListDetail(p.computeClient, images.ListOpts{Status: "ACTIVE"}) var imageID string err = pager.EachPage(func(page pagination.Page) (bool, error) { imageList, err := images.ExtractImages(page) if err != nil { return false, err } for _, i := range imageList { if i.Progress == 100 && strings.HasPrefix(i.Name, osPrefix) { imageID = i.ID return false, nil } } return true, nil }) if err != nil { return } // create the server with a unique name server, err := servers.Create(p.computeClient, keypairs.CreateOptsExt{ CreateOptsBuilder: servers.CreateOpts{ Name: uniqueResourceName(resources.ResourceName), FlavorRef: flavorID, ImageRef: imageID, SecurityGroups: []string{p.securityGroup}, Networks: []servers.Network{{UUID: p.networkUUID}}, UserData: postCreationScript, // Metadata map[string]string }, KeyName: resources.ResourceName, }).Extract() if err != nil { return } // wait for it to come up; servers.WaitForStatus has a timeout, but it // doesn't always work, so we roll our own waitForActive := make(chan error) go func() { timeout := time.After(60 * time.Second) ticker := time.NewTicker(1 * time.Second) for { select { case <-ticker.C: current, err := servers.Get(p.computeClient, server.ID).Extract() if err != nil { ticker.Stop() waitForActive <- err return } if current.Status == "ACTIVE" { ticker.Stop() waitForActive <- nil return } if current.Status == "ERROR" { ticker.Stop() waitForActive <- errors.New("there was an error in bringing up the new server") return } continue case <-timeout: ticker.Stop() waitForActive <- errors.New("timed out waiting for server to become ACTIVE") return } } }() err = <-waitForActive if err != nil { // since we're going to return an error that we failed to spawn, try and // delete the bad server in case it is still there servers.Delete(p.computeClient, server.ID) return } // *** NB. it can still take some number of seconds before I can ssh to it serverID = server.ID adminPass = server.AdminPass // get the servers IP; if we error for any reason we'll delete the server // first, because without an IP it's useless if externalIP { // give it a floating ip var floatingIP string floatingIP, err = p.getAvailableFloatingIP() if err != nil { p.destroyServer(serverID) return } // associate floating ip with server *** we have a race condition // between finding/creating free floating IP above, and using it here err = floatingips.AssociateInstance(p.computeClient, serverID, floatingips.AssociateOpts{ FloatingIP: floatingIP, }).ExtractErr() if err != nil { p.destroyServer(serverID) return } serverIP = floatingIP } else { // find its auto-assigned internal ip *** there must be a better way of // doing this... allNetworkAddressPages, serr := servers.ListAddressesByNetwork(p.computeClient, serverID, p.networkName).AllPages() if serr != nil { p.destroyServer(serverID) err = serr return } allNetworkAddresses, serr := servers.ExtractNetworkAddresses(allNetworkAddressPages) if serr != nil { p.destroyServer(serverID) err = serr return } for _, address := range allNetworkAddresses { if address.Version == 4 && strings.HasPrefix(address.Address, "192.168") { serverIP = address.Address break } } } return }