func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error { if s.mounts == nil { return nil } wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) for len(s.mounts) > 0 { var path string lastIndex := len(s.mounts) - 1 path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex] unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path)) if err != nil { return fmt.Errorf("Error creating unmount command: %s", err) } stderr := new(bytes.Buffer) cmd := ShellCommand(unmountCommand) cmd.Stderr = stderr if err := cmd.Run(); err != nil { return fmt.Errorf( "Error unmounting device: %s\nStderr: %s", err, stderr.String()) } } s.mounts = nil return nil }
func (s *StepSecurityGroup) Cleanup(state multistep.StateBag) { if s.createdGroupId == "" { return } ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) ui.Say("Deleting temporary security group...") var err error for i := 0; i < 5; i++ { _, err = ec2conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{GroupId: &s.createdGroupId}) if err == nil { break } log.Printf("Error deleting security group: %s", err) time.Sleep(5 * time.Second) } if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up security group. Please delete the group manually: %s", s.createdGroupId)) } }
func (s *StepOutputDir) Run(state multistep.StateBag) multistep.StepAction { dir := state.Get("dir").(OutputDir) ui := state.Get("ui").(packer.Ui) exists, err := dir.DirExists() if err != nil { state.Put("error", err) return multistep.ActionHalt } if exists { if s.Force { ui.Say("Deleting previous output directory...") dir.RemoveAll() } else { state.Put("error", fmt.Errorf( "Output directory '%s' already exists.", dir.String())) return multistep.ActionHalt } } if err := dir.MkdirAll(); err != nil { state.Put("error", err) return multistep.ActionHalt } s.success = true return multistep.ActionContinue }
// Cleanup destroys the GCE instance created during the image creation process. func (s *StepCreateInstance) Cleanup(state multistep.StateBag) { if s.instanceName == "" { return } config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) ui.Say("Deleting instance...") errCh, err := driver.DeleteInstance(config.Zone, s.instanceName) if err == nil { select { case err = <-errCh: case <-time.After(config.stateTimeout): err = errors.New("time out while waiting for instance to delete") } } if err != nil { ui.Error(fmt.Sprintf( "Error deleting instance. Please delete it manually.\n\n"+ "Name: %s\n"+ "Error: %s", s.instanceName, err)) } s.instanceName = "" return }
func (s *StepMountDvdDrive) Cleanup(state multistep.StateBag) { if s.path == "" { return } errorMsg := "Error unmounting dvd drive: %s" vmName := state.Get("vmName").(string) driver := state.Get("driver").(hypervcommon.Driver) ui := state.Get("ui").(packer.Ui) ui.Say("Unmounting dvd drive...") var err error = nil var blockBuffer bytes.Buffer blockBuffer.WriteString("Invoke-Command -scriptblock {Set-VMDvdDrive -VMName '") blockBuffer.WriteString(vmName) blockBuffer.WriteString("' -Path $null}") err = driver.HypervManage(blockBuffer.String()) if err != nil { ui.Error(fmt.Sprintf(errorMsg, err)) } }
func (s *StepProvision) Run(state multistep.StateBag) multistep.StepAction { comm := s.Comm if comm == nil { comm = state.Get("communicator").(packer.Communicator) } hook := state.Get("hook").(packer.Hook) ui := state.Get("ui").(packer.Ui) // Run the provisioner in a goroutine so we can continually check // for cancellations... log.Println("Running the provision hook") errCh := make(chan error, 1) go func() { errCh <- hook.Run(packer.HookProvision, ui, comm, nil) }() for { select { case err := <-errCh: if err != nil { state.Put("error", err) return multistep.ActionHalt } return multistep.ActionContinue case <-time.After(1 * time.Second): if _, ok := state.GetOk(multistep.StateCancelled); ok { log.Println("Cancelling provisioning due to interrupt...") hook.Cancel() return multistep.ActionHalt } } } }
func cancelCallback(state multistep.StateBag) bool { cancel := false if _, ok := state.GetOk(multistep.StateCancelled); ok { cancel = true } return cancel }
func (s *stepTakeSnapshot) Run(state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) c := state.Get("config").(*Config) ui.Say("Creating ProfitBricks snapshot...") profitbricks.SetAuth(c.PBUsername, c.PBPassword) dcId := state.Get("datacenter_id").(string) volumeId := state.Get("volume_id").(string) snapshot := profitbricks.CreateSnapshot(dcId, volumeId, c.SnapshotName) state.Put("snapshotname", c.SnapshotName) if snapshot.StatusCode > 299 { var restError RestError json.Unmarshal([]byte(snapshot.Response), &restError) if len(restError.Messages) > 0 { ui.Error(restError.Messages[0].Message) } else { ui.Error(snapshot.Response) } return multistep.ActionHalt } s.waitTillProvisioned(snapshot.Headers.Get("Location"), *c) return multistep.ActionContinue }
func sshConfig(state multistep.StateBag) (*gossh.ClientConfig, error) { config := state.Get("config").(*Config) var privateKey string var auth []gossh.AuthMethod if config.Comm.SSHPassword != "" { auth = []gossh.AuthMethod{ gossh.Password(config.Comm.SSHPassword), gossh.KeyboardInteractive( ssh.PasswordKeyboardInteractive(config.Comm.SSHPassword)), } } if config.Comm.SSHPrivateKey != "" { if priv, ok := state.GetOk("privateKey"); ok { privateKey = priv.(string) } signer, err := gossh.ParsePrivateKey([]byte(privateKey)) if err != nil { return nil, fmt.Errorf("Error setting up SSH config: %s", err) } if err != nil { return nil, err } auth = append(auth, gossh.PublicKeys(signer)) } return &gossh.ClientConfig{ User: config.Comm.SSHUsername, Auth: auth, }, nil }
func (s *stepStopInstance) Run(state multistep.StateBag) multistep.StepAction { ec2conn := state.Get("ec2").(*ec2.EC2) instance := state.Get("instance").(*ec2.Instance) ui := state.Get("ui").(packer.Ui) // Stop the instance so we can create an AMI from it ui.Say("Stopping the source instance...") _, err := ec2conn.StopInstances(instance.InstanceId) if err != nil { err := fmt.Errorf("Error stopping instance: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } // Wait for the instance to actual stop ui.Say("Waiting for the instance to stop...") stateChange := awscommon.StateChangeConf{ Conn: ec2conn, Pending: []string{"running", "stopping"}, Target: "stopped", Refresh: awscommon.InstanceStateRefreshFunc(ec2conn, instance), StepState: state, } _, err = awscommon.WaitForState(&stateChange) if err != nil { err := fmt.Errorf("Error waiting for instance to stop: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } return multistep.ActionContinue }
func (s *StepGetIPAddress) Run(state multistep.StateBag) multistep.StepAction { s.say("Getting the VM's IP address ...") var resourceGroupName = state.Get(constants.ArmResourceGroupName).(string) var ipAddressName = state.Get(constants.ArmPublicIPAddressName).(string) var nicName = state.Get(constants.ArmNicName).(string) s.say(fmt.Sprintf(" -> ResourceGroupName : '%s'", resourceGroupName)) s.say(fmt.Sprintf(" -> PublicIPAddressName : '%s'", ipAddressName)) s.say(fmt.Sprintf(" -> NicName : '%s'", nicName)) s.say(fmt.Sprintf(" -> Network Connection : '%s'", EndpointCommunicationText[s.endpoint])) address, err := s.get(resourceGroupName, ipAddressName, nicName) if err != nil { state.Put(constants.Error, err) s.error(err) return multistep.ActionHalt } state.Put(constants.SSHHost, address) s.say(fmt.Sprintf(" -> IP Address : '%s'", address)) return multistep.ActionContinue }
func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If we used an SSH private key file, do not go about deleting // keypairs if s.PrivateKeyFile != "" { return } // If no key name is set, then we never created it, so just return if s.keyName == "" { return } config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) // We need the v2 compute client computeClient, err := config.computeV2Client() if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) return } ui.Say("Deleting temporary keypair...") err = keypairs.Delete(computeClient, s.keyName).ExtractErr() if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) } }
// Run adds a virtual CD-ROM device to the VM and attaches Parallels Tools ISO image. // If ISO image is not specified, then this step will be skipped. func (s *StepAttachParallelsTools) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) // If we're not attaching the guest additions then just return if s.ParallelsToolsMode != ParallelsToolsModeAttach { log.Println("Not attaching parallels tools since we're uploading.") return multistep.ActionContinue } // Get the Paralells Tools path on the host machine parallelsToolsPath := state.Get("parallels_tools_path").(string) // Attach the guest additions to the computer ui.Say("Attaching Parallels Tools ISO to the new CD/DVD drive...") cdrom, err := driver.DeviceAddCDROM(vmName, parallelsToolsPath) if err != nil { err = fmt.Errorf("Error attaching Parallels Tools ISO: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } // Track the device name so that we can can delete later s.cdromDevice = cdrom return multistep.ActionContinue }
func (s *StepCopyFiles) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*Config) mountPath := state.Get("mount_path").(string) ui := state.Get("ui").(packer.Ui) s.files = make([]string, 0, len(config.CopyFiles)) if len(config.CopyFiles) > 0 { ui.Say("Copying files from host to chroot...") for _, path := range config.CopyFiles { ui.Message(path) chrootPath := filepath.Join(mountPath, path) log.Printf("Copying '%s' to '%s'", path, chrootPath) if err := s.copySingle(chrootPath, path); err != nil { err := fmt.Errorf("Error copying file: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } s.files = append(s.files, chrootPath) } } state.Put("copy_files_cleanup", s) return multistep.ActionContinue }
func (s *StepDownload) Run(state multistep.StateBag) multistep.StepAction { cache := state.Get("cache").(packer.Cache) ui := state.Get("ui").(packer.Ui) var checksum []byte if s.Checksum != "" { var err error checksum, err = hex.DecodeString(s.Checksum) if err != nil { state.Put("error", fmt.Errorf("Error parsing checksum: %s", err)) return multistep.ActionHalt } } ui.Say(fmt.Sprintf("Downloading or copying %s", s.Description)) var finalPath string for _, url := range s.Url { ui.Message(fmt.Sprintf("Downloading or copying: %s", url)) targetPath := s.TargetPath if targetPath == "" { log.Printf("Acquiring lock to download: %s", url) targetPath = cache.Lock(url) defer cache.Unlock(url) } config := &DownloadConfig{ Url: url, TargetPath: targetPath, CopyFile: false, Hash: HashForType(s.ChecksumType), Checksum: checksum, } path, err, retry := s.download(config, state) if err != nil { ui.Message(fmt.Sprintf("Error downloading: %s", err)) } if !retry { return multistep.ActionHalt } if err == nil { finalPath = path break } } if finalPath == "" { err := fmt.Errorf("%s download failed.", s.Description) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } state.Put(s.ResultKey, finalPath) return multistep.ActionContinue }
func (s *StepForwardSSH) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) guestPort := s.CommConfig.Port() sshHostPort := guestPort if !s.SkipNatMapping { log.Printf("Looking for available communicator (SSH, WinRM, etc) port between %d and %d", s.HostPortMin, s.HostPortMax) offset := 0 portRange := int(s.HostPortMax - s.HostPortMin) if portRange > 0 { // Have to check if > 0 to avoid a panic offset = rand.Intn(portRange) } for { sshHostPort = offset + int(s.HostPortMin) if sshHostPort >= int(s.HostPortMax) { offset = 0 sshHostPort = int(s.HostPortMin) } log.Printf("Trying port: %d", sshHostPort) l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", sshHostPort)) if err == nil { defer l.Close() break } offset++ } // Create a forwarded port mapping to the VM ui.Say(fmt.Sprintf("Creating forwarded port mapping for communicator (SSH, WinRM, etc) (host port %d)", sshHostPort)) command := []string{ "modifyvm", vmName, "--natpf1", "delete", "packercomm", } driver.VBoxManage(command...) command = []string{ "modifyvm", vmName, "--natpf1", fmt.Sprintf("packercomm,tcp,127.0.0.1,%d,,%d", sshHostPort, guestPort), } if err := driver.VBoxManage(command...); err != nil { err := fmt.Errorf("Error creating port forwarding rule: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } } // Save the port we're using so that future steps can use it state.Put("sshHostPort", sshHostPort) return multistep.ActionContinue }
func (s *StepCreateImage) Run(state multistep.StateBag) multistep.StepAction { client := state.Get(constants.RequestManager).(management.Client) ui := state.Get(constants.Ui).(packer.Ui) errorMsg := "Error Creating Azure Image: %s" ui.Say("Creating Azure Image. If Successful, This Will Remove the Temporary VM...") description := "packer made image" imageFamily := "PackerMade" if err := retry.ExecuteAsyncOperation(client, func() (management.OperationID, error) { return vmi.NewClient(client).Capture(s.TmpServiceName, s.TmpVmName, s.TmpVmName, s.UserImageName, s.UserImageLabel, vmi.OSStateGeneralized, vmi.CaptureParameters{ Description: description, ImageFamily: imageFamily, RecommendedVMSize: s.RecommendedVMSize, }) }); err != nil { err := fmt.Errorf(errorMsg, err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } // CatpureVMImage removes the VM state.Put(constants.ImageCreated, 1) state.Put(constants.VmExists, 0) return multistep.ActionContinue }
func (s *StepRunSourceServer) Cleanup(state multistep.StateBag) { if s.server == nil { return } config := state.Get("config").(Config) ui := state.Get("ui").(packer.Ui) // We need the v2 compute client computeClient, err := config.computeV2Client() if err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return } ui.Say("Terminating the source server...") if err := servers.Delete(computeClient, s.server.ID).ExtractErr(); err != nil { ui.Error(fmt.Sprintf("Error terminating server, may still be around: %s", err)) return } stateChange := StateChangeConf{ Pending: []string{"ACTIVE", "BUILD", "REBUILD", "SUSPENDED", "SHUTOFF", "STOPPED"}, Refresh: ServerStateRefreshFunc(computeClient, s.server), Target: []string{"DELETED"}, } WaitForState(&stateChange) }
func (s *stepRun) Cleanup(state multistep.StateBag) { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) // If we started the machine... stop it. if s.vmxPath != "" { // If we started it less than 5 seconds ago... wait. sinceBootTime := time.Since(s.bootTime) waitBootTime := 5 * time.Second if sinceBootTime < waitBootTime { sleepTime := waitBootTime - sinceBootTime ui.Say(fmt.Sprintf("Waiting %s to give VMware time to clean up...", sleepTime.String())) time.Sleep(sleepTime) } // See if it is running running, _ := driver.IsRunning(s.vmxPath) if running { ui.Say("Stopping virtual machine...") if err := driver.Stop(s.vmxPath); err != nil { ui.Error(fmt.Sprintf("Error stopping VM: %s", err)) } } if remoteDriver, ok := driver.(RemoteDriver); ok && s.registered { ui.Say("Unregistering virtual machine...") if err := remoteDriver.Unregister(s.vmxPath); err != nil { ui.Error(fmt.Sprintf("Error unregistering VM: %s", err)) } s.registered = false } } }
// Checks the configuration on the filesystem for syntax errors or // non-exsistance. func (*StepCheckConfigurationFile) Run(state multistep.StateBag) multistep.StepAction { log.Println("Checking configuration file...") configPath := state.Get("config_path").(string) var path string // Determine if we are dealing with a custom config path if configPath == "" { // Default to the home directory path = os.Getenv("HOME") + "/.gethubconfig" } else { // They've specified a custom config path log.Println("Environment specified config path", configPath) path = configPath + "/.gethubconfig" } // Is the config file even there? _, err := os.Stat(path) if err != nil { fmt.Println(RED + "It seems as though you haven't set-up gethub. Please run `gethub authorize`" + CLEAR) return multistep.ActionHalt } // Read the file and see if all is well with a basic config c, err2 := config.ReadDefault(path) checkPath, _ := c.String("gethub", "path") if checkPath == "" || err2 != nil { fmt.Println(RED + "Something seems to be wrong with your ~/.gethubconfig file. Please run `gethub authorize`" + CLEAR) return multistep.ActionHalt } return multistep.ActionContinue }
func (s *StepRegister) Cleanup(state multistep.StateBag) { if s.registeredPath == "" { return } driver := state.Get("driver").(vmwcommon.Driver) ui := state.Get("ui").(packer.Ui) if remoteDriver, ok := driver.(RemoteDriver); ok { if s.Format == "" { ui.Say("Unregistering virtual machine...") if err := remoteDriver.Unregister(s.registeredPath); err != nil { ui.Error(fmt.Sprintf("Error unregistering VM: %s", err)) } s.registeredPath = "" } else { ui.Say("Destroying virtual machine...") if err := remoteDriver.Destroy(); err != nil { ui.Error(fmt.Sprintf("Error destroying VM: %s", err)) } // Wait for the machine to actually destroy for { destroyed, _ := remoteDriver.IsDestroyed() if destroyed { break } time.Sleep(150 * time.Millisecond) } } } }
func (s *StepUploadVersion) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) if s.Path == "" { log.Println("VBoxVersionFile is empty. Not uploading.") return multistep.ActionContinue } version, err := driver.Version() if err != nil { state.Put("error", fmt.Errorf("Error reading version for metadata upload: %s", err)) return multistep.ActionHalt } ui.Say(fmt.Sprintf("Uploading VirtualBox version info (%s)", version)) var data bytes.Buffer data.WriteString(version) if err := comm.Upload(s.Path, &data); err != nil { state.Put("error", fmt.Errorf("Error uploading VirtualBox version: %s", err)) return multistep.ActionHalt } return multistep.ActionContinue }
func (self *stepCreateInstance) Cleanup(state multistep.StateBag) { client := state.Get("client").(*SoftlayerClient) config := state.Get("config").(config) ui := state.Get("ui").(packer.Ui) if self.instanceId == "" { return } ui.Say("Waiting for the instance to have no active transactions before destroying it...") // We should wait until the instance is up/have no transactions, // since if the instance will have some assigned transactions the destroy API call will fail err := client.waitForInstanceReady(self.instanceId, config.StateTimeout) if err != nil { log.Printf("Error destroying instance: %v", err.Error()) ui.Error(fmt.Sprintf("Error waiting for instance to become ACTIVE for instance (%s)", self.instanceId)) } ui.Say("Destroying instance...") err = client.DestroyInstance(self.instanceId) if err != nil { log.Printf("Error destroying instance: %v", err.Error()) ui.Error(fmt.Sprintf("Error cleaning up the instance. Please delete the instance (%s) manually", self.instanceId)) } }
// Deleting the instance does not remove the boot disk. This cleanup removes // the disk. func (s *StepTeardownInstance) Cleanup(state multistep.StateBag) { config := state.Get("config").(*Config) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) ui.Say("Deleting disk...") errCh, err := driver.DeleteDisk(config.Zone, config.DiskName) if err == nil { select { case err = <-errCh: case <-time.After(config.stateTimeout): err = errors.New("time out while waiting for disk to delete") } } if err != nil { ui.Error(fmt.Sprintf( "Error deleting disk. Please delete it manually.\n\n"+ "Name: %s\n"+ "Error: %s", config.InstanceName, err)) } ui.Message("Disk has been deleted!") return }
func (s *StepConnect) Run(state multistep.StateBag) multistep.StepAction { typeMap := map[string]multistep.Step{ "none": nil, "ssh": &StepConnectSSH{ Config: s.Config, Host: s.Host, SSHConfig: s.SSHConfig, SSHPort: s.SSHPort, }, "winrm": &StepConnectWinRM{ Config: s.Config, Host: s.Host, WinRMConfig: s.WinRMConfig, WinRMPort: s.SSHPort, }, } for k, v := range s.CustomConnect { typeMap[k] = v } step, ok := typeMap[s.Config.Type] if !ok { state.Put("error", fmt.Errorf("unknown communicator type: %s", s.Config.Type)) return multistep.ActionHalt } if step == nil { log.Printf("[INFO] communicator disabled, will not connect") return multistep.ActionContinue } s.substep = step return s.substep.Run(state) }
func (s *stepAttachISO) Cleanup(state multistep.StateBag) { if s.diskPath == "" { return } config := state.Get("config").(*config) driver := state.Get("driver").(vboxcommon.Driver) vmName := state.Get("vmName").(string) controllerName := "IDE Controller" port := "0" device := "1" if config.ISOInterface == "sata" { controllerName = "SATA Controller" port = "1" device = "0" } command := []string{ "storageattach", vmName, "--storagectl", controllerName, "--port", port, "--device", device, "--medium", "none", } // Remove the ISO. Note that this will probably fail since // stepRemoveDevices does this as well. No big deal. driver.VBoxManage(command...) }
// NewRunnerWithPauseFn returns a multistep.Runner that runs steps augmented // with support for -debug and -on-error command line arguments. With -debug it // puts the multistep.DebugPauseFn that will pause execution between steps into // the state under the key "pauseFn". func NewRunnerWithPauseFn(steps []multistep.Step, config PackerConfig, ui packer.Ui, state multistep.StateBag) multistep.Runner { runner, pauseFn := newRunner(steps, config, ui) if pauseFn != nil { state.Put("pauseFn", pauseFn) } return runner }
func (self *stepCreateInstance) Cleanup(state multistep.StateBag) { config := state.Get("config").(config) if config.ShouldKeepVM(state) { return } ui := state.Get("ui").(packer.Ui) if self.instance != nil { ui.Say("Destroying VM") _ = self.instance.HardShutdown() // redundant, just in case err := self.instance.Destroy() if err != nil { ui.Error(err.Error()) } } if self.vdi != nil { ui.Say("Destroying VDI") err := self.vdi.Destroy() if err != nil { ui.Error(err.Error()) } } }
func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) ui.Say("Starting the virtual machine...") err := driver.Start(vmName) if err != nil { err := fmt.Errorf("Error starting vm: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } s.vmName = vmName if int64(s.BootWait) > 0 { ui.Say(fmt.Sprintf("Waiting %s for boot...", s.BootWait)) wait := time.After(s.BootWait) WAITLOOP: for { select { case <-wait: break WAITLOOP case <-time.After(1 * time.Second): if _, ok := state.GetOk(multistep.StateCancelled); ok { return multistep.ActionHalt } } } } return multistep.ActionContinue }
func (s *StepKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return // If we used an SSH private key file, do not go about deleting // keypairs if s.PrivateKeyFile != "" { return } ec2conn := state.Get("ec2").(*ec2.EC2) ui := state.Get("ui").(packer.Ui) // Remove the keypair ui.Say("Deleting temporary keypair...") _, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{KeyName: &s.keyName}) if err != nil { ui.Error(fmt.Sprintf( "Error cleaning up keypair. Please delete the key manually: %s", s.keyName)) } // Also remove the physical key if we're debugging. if s.Debug { if err := os.Remove(s.DebugKeyPath); err != nil { ui.Error(fmt.Sprintf( "Error removing debug key '%s': %s", s.DebugKeyPath, err)) } } }