func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, }, raws...) errs := new(packer.MultiError) // If there is no explicit number of Go threads to use, then set it if os.Getenv("GOMAXPROCS") == "" { runtime.GOMAXPROCS(runtime.NumCPU()) } if p.config.OutputPath == "" { p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" } if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing target template: %s", err)) } templates := map[string]*string{ "output": &p.config.OutputPath, } if p.config.CompressionLevel > pgzip.BestCompression { p.config.CompressionLevel = pgzip.BestCompression } // Technically 0 means "don't compress" but I don't know how to // differentiate between "user entered zero" and "user entered nothing". // Also, why bother creating a compressed file with zero compression? if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 { p.config.CompressionLevel = pgzip.DefaultCompression } for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( errs, fmt.Errorf("%s must be set", key)) } *ptr, err = interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) } } p.config.detectFromFilename() if len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error { ui.Message("Installing Chef...") p.config.ctx.Data = &InstallChefTemplate{ Sudo: !p.config.PreventSudo, } command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx) if err != nil { return err } ui.Message(command) cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf( "Install script exited with non-zero exit status %d", cmd.ExitStatus) } return nil }
// NewCore creates a new Core. func NewCore(c *CoreConfig) (*Core, error) { result := &Core{ Template: c.Template, components: c.Components, variables: c.Variables, version: c.Version, } if err := result.validate(); err != nil { return nil, err } if err := result.init(); err != nil { return nil, err } // Go through and interpolate all the build names. We shuld be able // to do this at this point with the variables. result.builds = make(map[string]*template.Builder) for _, b := range c.Template.Builders { v, err := interpolate.Render(b.Name, result.Context()) if err != nil { return nil, fmt.Errorf( "Error interpolating builder '%s': %s", b.Name, err) } result.builds[v] = b } return result, nil }
func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string, args []string) error { flags := []string{ "-y", "-c", knifeConfigPath, } p.config.ctx.Data = &KnifeTemplate{ Sudo: !p.config.PreventSudo, Flags: strings.Join(flags, " "), Args: strings.Join(args, " "), } command, err := interpolate.Render(p.config.KnifeCommand, &p.config.ctx) if err != nil { return err } cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf( "Non-zero exit status. See output above for more info.\n\n"+ "Command: %s", command) } return nil }
func (p *PostProcessor) Configure(raws ...interface{}) error { err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{}, }, }, raws...) if err != nil { return err } errs := &packer.MultiError{} errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...) // required configuration templates := map[string]*string{ "region": &p.config.Region, "bucket": &p.config.Bucket, "manifest": &p.config.ManifestPath, "box_name": &p.config.BoxName, "box_dir": &p.config.BoxDir, "version": &p.config.Version, } // Template process for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( errs, fmt.Errorf("%s must be set", key)) } *ptr, err = interpolate.Render(*ptr, &p.config.ctx) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", key, err)) } } // setup the s3 bucket auth, err := aws.GetAuth(p.config.AccessConfig.AccessKey, p.config.AccessConfig.SecretKey) if err != nil { errs = packer.MultiErrorAppend(errs, err) } // determine region region, valid := aws.Regions[p.config.Region] if valid { p.s3 = s3.New(auth, region).Bucket(p.config.Bucket) } else { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Invalid region specified: %s", p.config.Region)) } if len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error { p.config.ctx.Data = &ExecuteTemplate{ ConfigPath: config, JsonPath: json, Sudo: !p.config.PreventSudo, } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return err } ui.Message(fmt.Sprintf("Executing Chef: %s", command)) cmd := &packer.RemoteCmd{ Command: command, } if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus) } return nil }
func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'knife.rb'") // Read the template tpl := DefaultKnifeTemplate ctx := p.config.ctx ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, ClientKey: clientKey, SslVerifyMode: sslVerifyMode, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
func (p *Provisioner) createCommandTextPrivileged() (command string, err error) { // Can't double escape the env vars, lets create shiny new ones flattenedEnvVars, err := p.createFlattenedEnvVars(true) if err != nil { return "", err } p.config.ctx.Data = &ExecuteCommandTemplate{ Vars: flattenedEnvVars, Path: p.config.RemotePath, } command, err = interpolate.Render(p.config.ElevatedExecuteCommand, &p.config.ctx) if err != nil { return "", fmt.Errorf("Error processing command: %s", err) } // OK so we need an elevated shell runner to wrap our command, this is going to have its own path // generate the script and update the command runner in the process path, err := p.generateElevatedRunner(command) if err != nil { return "", fmt.Errorf("Error generating elevated runner: %s", err) } // Return the path to the elevated shell wrapper command = fmt.Sprintf("powershell -executionpolicy bypass -file \"%s\"", path) return command, err }
func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx interpolate.Context, ui packer.Ui) error { for _, rawCmd := range commands { intCmd, err := interpolate.Render(rawCmd, &ctx) if err != nil { return fmt.Errorf("Error interpolating: %s", err) } command, err := wrappedCommand(intCmd) if err != nil { return fmt.Errorf("Error wrapping command: %s", err) } ui.Say(fmt.Sprintf("Executing command: %s", command)) comm := &shell_local.Communicator{} cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { return fmt.Errorf("Error executing command: %s", err) } if cmd.ExitStatus != 0 { return fmt.Errorf( "Received non-zero exit code %d from command: %s", cmd.ExitStatus, command) } } return nil }
func (s *StepBundleVolume) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) config := state.Get("config").(*Config) instance := state.Get("instance").(*ec2.Instance) ui := state.Get("ui").(packer.Ui) x509RemoteCertPath := state.Get("x509RemoteCertPath").(string) x509RemoteKeyPath := state.Get("x509RemoteKeyPath").(string) // Bundle the volume var err error config.ctx.Data = bundleCmdData{ AccountId: config.AccountId, Architecture: *instance.Architecture, CertPath: x509RemoteCertPath, Destination: config.BundleDestination, KeyPath: x509RemoteKeyPath, Prefix: config.BundlePrefix, PrivatePath: config.X509UploadPath, } config.BundleVolCommand, err = interpolate.Render(config.BundleVolCommand, &config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle volume command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say("Bundling the volume...") cmd := new(packer.RemoteCmd) cmd.Command = config.BundleVolCommand if s.Debug { ui.Say(fmt.Sprintf("Running: %s", config.BundleVolCommand)) } if err := cmd.StartWithUi(comm, ui); err != nil { state.Put("error", fmt.Errorf("Error bundling volume: %s", err)) ui.Error(state.Get("error").(error).Error()) return multistep.ActionHalt } if cmd.ExitStatus != 0 { state.Put("error", fmt.Errorf( "Volume bundling failed. Please see the output above for more\n"+ "details on what went wrong.\n\n"+ "One common cause for this error is ec2-bundle-vol not being\n"+ "available on the target instance.")) ui.Error(state.Get("error").(error).Error()) return multistep.ActionHalt } // Store the manifest path manifestName := config.BundlePrefix + ".manifest.xml" state.Put("manifest_name", manifestName) state.Put("manifest_path", fmt.Sprintf( "%s/%s", config.BundleDestination, manifestName)) return multistep.ActionContinue }
func (s *StepUploadBundle) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) config := state.Get("config").(*Config) manifestName := state.Get("manifest_name").(string) manifestPath := state.Get("manifest_path").(string) ui := state.Get("ui").(packer.Ui) region, err := config.Region() if err != nil { err := fmt.Errorf("Error retrieving region: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } config.ctx.Data = uploadCmdData{ AccessKey: config.AccessKey, BucketName: config.S3Bucket, BundleDirectory: config.BundleDestination, ManifestPath: manifestPath, Region: region, SecretKey: config.SecretKey, } config.BundleUploadCommand, err = interpolate.Render(config.BundleUploadCommand, config.ctx) if err != nil { err := fmt.Errorf("Error processing bundle upload command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say("Uploading the bundle...") cmd := &packer.RemoteCmd{Command: config.BundleUploadCommand} if s.Debug { ui.Say(fmt.Sprintf("Running: %s", config.BundleUploadCommand)) } if err := cmd.StartWithUi(comm, ui); err != nil { state.Put("error", fmt.Errorf("Error uploading volume: %s", err)) ui.Error(state.Get("error").(error).Error()) return multistep.ActionHalt } if cmd.ExitStatus != 0 { state.Put("error", fmt.Errorf( "Bundle upload failed. Please see the output above for more\n"+ "details on what went wrong.")) ui.Error(state.Get("error").(error).Error()) return multistep.ActionHalt } state.Put("remote_manifest_path", fmt.Sprintf( "%s/%s", config.S3Bucket, manifestName)) return multistep.ActionContinue }
func (p *OVFPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != "mitchellh.vmware" { return nil, false, fmt.Errorf("ovftool post-processor can only be used on VMware boxes: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path } } if vmx == "" { return nil, false, fmt.Errorf("VMX file could not be located.") } // Strip DVD and floppy drives from the VMX if err := p.stripDrives(vmx); err != nil { return nil, false, fmt.Errorf("Couldn't strip floppy/DVD drives from VMX") } p.cfg.ctx.Data = &OutputPathTemplate{ ArtifactId: artifact.Id(), BuildName: p.cfg.BuildName, Provider: "vmware", } targetPath, err := interpolate.Render(p.cfg.TargetPath, &p.cfg.ctx) if err != nil { return nil, false, err } // build the arguments args := []string{ "--targetType=" + p.cfg.TargetType, "--acceptAllEulas", } // append --compression, if it is set if p.cfg.Compression > 0 { args = append(args, fmt.Sprintf("--compress=%d", p.cfg.Compression)) } // add the source/target args = append(args, vmx, targetPath) ui.Message(fmt.Sprintf("Executing ovftool with arguments: %+v", args)) cmd := exec.Command(executable, args...) var buffer bytes.Buffer cmd.Stdout = &buffer cmd.Stderr = &buffer err = cmd.Run() if err != nil { return nil, false, fmt.Errorf("Unable to execute ovftool: %s", buffer.String()) } ui.Message(fmt.Sprintf("%s", buffer.String())) return artifact, false, nil }
func (s *stepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*Config) httpPort := state.Get("http_port").(uint) ui := state.Get("ui").(packer.Ui) vncPort := state.Get("vnc_port").(uint) // Connect to VNC ui.Say("Connecting to VM via VNC") nc, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", vncPort)) if err != nil { err := fmt.Errorf("Error connecting to VNC: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } defer nc.Close() c, err := vnc.Client(nc, &vnc.ClientConfig{Exclusive: false}) if err != nil { err := fmt.Errorf("Error handshaking with VNC: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } defer c.Close() log.Printf("Connected to VNC desktop: %s", c.DesktopName) ctx := config.ctx ctx.Data = &bootCommandTemplateData{ "10.0.2.2", httpPort, config.VMName, } ui.Say("Typing the boot command over VNC...") for _, command := range config.BootCommand { command, err := interpolate.Render(command, &ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } // Check for interrupts between typing things so we can cancel // since this isn't the fastest thing. if _, ok := state.GetOk(multistep.StateCancelled); ok { return multistep.ActionHalt } vncSendString(c, command) } return multistep.ActionContinue }
func (p *Provisioner) createCommandText() (command string, err error) { // Create environment variables to set before executing the command flattenedEnvVars, err := p.createFlattenedEnvVars(false) if err != nil { return "", err } p.config.ctx.Data = &ExecuteCommandTemplate{ Vars: flattenedEnvVars, Path: p.config.RemotePath, } command, err = interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return "", err } // Return the interpolated command if p.config.ElevatedUser == "" { return command, nil } // Can't double escape the env vars, lets create shiny new ones flattenedEnvVars, err = p.createFlattenedEnvVars(true) if err != nil { return "", err } p.config.ctx.Data = &ExecuteCommandTemplate{ Vars: flattenedEnvVars, Path: p.config.RemotePath, } command, err = interpolate.Render(p.config.ElevatedExecuteCommand, &p.config.ctx) if err != nil { return "", err } // OK so we need an elevated shell runner to wrap our command, this is going to have its own path // generate the script and update the command runner in the process path, err := p.generateElevatedRunner(command) // Return the path to the elevated shell wrapper command = fmt.Sprintf("powershell -executionpolicy bypass -file \"%s\"", path) return }
func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) httpPort := state.Get("http_port").(uint) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) s.Ctx.Data = &bootCommandTemplateData{ "10.0.2.2", httpPort, vmName, } ui.Say("Typing the boot command...") for _, command := range s.BootCommand { command, err := interpolate.Render(command, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } for _, code := range scancodes(command) { if code == "wait" { time.Sleep(1 * time.Second) continue } if code == "wait5" { time.Sleep(5 * time.Second) continue } if code == "wait10" { time.Sleep(10 * time.Second) continue } // Since typing is sometimes so slow, we check for an interrupt // in between each character. if _, ok := state.GetOk(multistep.StateCancelled); ok { return multistep.ActionHalt } if err := driver.VBoxManage("controlvm", vmName, "keyboardputscancode", code); err != nil { err := fmt.Errorf("Error sending boot command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } } } return multistep.ActionContinue }
func (p *Provisioner) createConfig( ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, encryptedDataBagSecretPath, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'client.rb'") // Read the template tpl := DefaultConfigTemplate if p.config.ConfigTemplate != "" { f, err := os.Open(p.config.ConfigTemplate) if err != nil { return "", err } defer f.Close() tplBytes, err := ioutil.ReadAll(f) if err != nil { return "", err } tpl = string(tplBytes) } ctx := p.config.ctx ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, ClientKey: clientKey, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, SslVerifyMode: sslVerifyMode, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, localCookbooks []string, rolesPath string, dataBagsPath string, encryptedDataBagSecretPath string, environmentsPath string, chefEnvironment string) (string, error) { ui.Message("Creating configuration file 'solo.rb'") cookbook_paths := make([]string, len(p.config.RemoteCookbookPaths)+len(localCookbooks)) for i, path := range p.config.RemoteCookbookPaths { cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } for i, path := range localCookbooks { i = len(p.config.RemoteCookbookPaths) + i cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } // Read the template tpl := DefaultConfigTemplate if p.config.ConfigTemplate != "" { f, err := os.Open(p.config.ConfigTemplate) if err != nil { return "", err } defer f.Close() tplBytes, err := ioutil.ReadAll(f) if err != nil { return "", err } tpl = string(tplBytes) } p.config.ctx.Data = &ConfigTemplate{ CookbookPaths: strings.Join(cookbook_paths, ","), RolesPath: rolesPath, DataBagsPath: dataBagsPath, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, EnvironmentsPath: environmentsPath, HasRolesPath: rolesPath != "", HasDataBagsPath: dataBagsPath != "", HasEncryptedDataBagSecretPath: encryptedDataBagSecretPath != "", HasEnvironmentsPath: environmentsPath != "", ChefEnvironment: chefEnvironment, } configString, err := interpolate.Render(tpl, &p.config.ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "solo.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
func (t *tunnel) Prepare(raw ...interface{}) error { var errs *packer.MultiError err := config.Decode(t, nil, raw...) if err != nil { return err } if t.Exec == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("missing tunnel provisioner parameter exec")) } t.Exec, err = interpolate.Render(t.Exec, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("error processing exec template: %s", err)) } for i, arg := range t.Args { t.Args[i], err = interpolate.Render(arg, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("error processing arg %d (%q): %s", i, arg, err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } var texec string texec, err = exec.LookPath(t.Exec) if err != nil { return fmt.Errorf("executable %q not found: %v", t.Exec, err) } t.Exec = texec t.server, err = newSSHServer() if err != nil { return fmt.Errorf("could not initialize ssh server: %v", err) } return nil }
func (s *StepTypeBootCommand) Run(state multistep.StateBag) multistep.StepAction { httpPort := state.Get("http_port").(uint) ui := state.Get("ui").(packer.Ui) driver := state.Get("driver").(Driver) vmName := state.Get("vmName").(string) hostIp, err := driver.GetHostAdapterIpAddressForSwitch(s.SwitchName) if err != nil { err := fmt.Errorf("Error getting host adapter ip address: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say(fmt.Sprintf("Host IP for the HyperV machine: %s", hostIp)) s.Ctx.Data = &bootCommandTemplateData{ hostIp, httpPort, vmName, } ui.Say("Typing the boot command...") scanCodesToSend := []string{} for _, command := range s.BootCommand { command, err := interpolate.Render(command, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing boot command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } scanCodesToSend = append(scanCodesToSend, scancodes(command)...) } scanCodesToSendString := strings.Join(scanCodesToSend, " ") if err := driver.TypeScanCodes(vmName, scanCodesToSendString); err != nil { err := fmt.Errorf("Error sending boot command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } return multistep.ActionContinue }
func (d *DockerDriver) StartContainer(config *ContainerConfig) (string, error) { // Build up the template data var tplData startContainerTemplate tplData.Image = config.Image ctx := *d.Ctx ctx.Data = &tplData // Args that we're going to pass to Docker args := []string{"run"} if config.Privileged { args = append(args, "--privileged") } for host, guest := range config.Volumes { args = append(args, "-v", fmt.Sprintf("%s:%s", host, guest)) } for _, v := range config.RunCommand { v, err := interpolate.Render(v, &ctx) if err != nil { return "", err } args = append(args, v) } d.Ui.Message(fmt.Sprintf( "Run command: docker %s", strings.Join(args, " "))) // Start the container var stdout, stderr bytes.Buffer cmd := exec.Command("docker", args...) cmd.Stdout = &stdout cmd.Stderr = &stderr log.Printf("Starting container with args: %v", args) if err := cmd.Start(); err != nil { return "", err } log.Println("Waiting for container to finish starting") if err := cmd.Wait(); err != nil { if _, ok := err.(*exec.ExitError); ok { err = fmt.Errorf("Docker exited with a non-zero exit status.\nStderr: %s", stderr.String()) } return "", err } // Capture the container ID, which is alone on stdout return strings.TrimSpace(stdout.String()), nil }
func (c *Communicator) Start(cmd *packer.RemoteCmd) error { // Render the template so that we know how to execute the command c.Ctx.Data = &ExecuteCommandTemplate{ Command: cmd.Command, } for i, field := range c.ExecuteCommand { command, err := interpolate.Render(field, &c.Ctx) if err != nil { return fmt.Errorf("Error processing command: %s", err) } c.ExecuteCommand[i] = command } // Build the local command to execute localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...) localCmd.Stdin = cmd.Stdin localCmd.Stdout = cmd.Stdout localCmd.Stderr = cmd.Stderr // Start it. If it doesn't work, then error right away. if err := localCmd.Start(); err != nil { return err } // We've started successfully. Start a goroutine to wait for // it to complete and track exit status. go func() { var exitStatus int err := localCmd.Wait() if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { exitStatus = 1 // There is no process-independent way to get the REAL // exit status so we just try to go deeper. if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { exitStatus = status.ExitStatus() } } } cmd.SetExited(exitStatus) }() return nil }
func (s *StepCreateVM) Run(state multistep.StateBag) multistep.StepAction { config := state.Get("config").(*Config) driver := state.Get("driver").(vboxcommon.Driver) ui := state.Get("ui").(packer.Ui) name, err := interpolate.Render(s.VMName, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing vm name: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } commands := make([][]string, 4) commands[0] = []string{ "createvm", "--name", name, "--ostype", config.GuestOSType, "--register", } commands[1] = []string{ "modifyvm", name, "--boot1", "disk", "--boot2", "dvd", "--boot3", "none", "--boot4", "none", } commands[2] = []string{"modifyvm", name, "--cpus", "1"} commands[3] = []string{"modifyvm", name, "--memory", "512"} ui.Say("Creating virtual machine...") for _, command := range commands { err := driver.VBoxManage(command...) if err != nil { err := fmt.Errorf("Error creating VM: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } // Set the VM name property on the first command if s.VMName == "" { s.VMName = name } } // Set the final name in the state bag so others can use it state.Put("vmName", name) return multistep.ActionContinue }
func (c *StepUploadTools) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) if c.ToolsUploadFlavor == "" { return multistep.ActionContinue } if c.RemoteType == "esx5" { if err := driver.ToolsInstall(); err != nil { state.Put("error", fmt.Errorf("Couldn't mount VMware tools ISO. Please check the 'guest_os_type' in your template.json.")) } return multistep.ActionContinue } comm := state.Get("communicator").(packer.Communicator) tools_source := state.Get("tools_upload_source").(string) ui := state.Get("ui").(packer.Ui) ui.Say(fmt.Sprintf("Uploading the '%s' VMware Tools", c.ToolsUploadFlavor)) f, err := os.Open(tools_source) if err != nil { state.Put("error", fmt.Errorf("Error opening VMware Tools ISO: %s", err)) return multistep.ActionHalt } defer f.Close() c.Ctx.Data = &toolsUploadPathTemplate{ Flavor: c.ToolsUploadFlavor, } c.ToolsUploadPath, err = interpolate.Render(c.ToolsUploadPath, &c.Ctx) if err != nil { err := fmt.Errorf("Error preparing upload path: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } if err := comm.Upload(c.ToolsUploadPath, f, nil); err != nil { err := fmt.Errorf("Error uploading VMware Tools: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } return multistep.ActionContinue }
func (s *StepUploadGuestAdditions) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) // If we're attaching then don't do this, since we attached. if s.GuestAdditionsMode != GuestAdditionsModeUpload { log.Println("Not uploading guest additions since mode is not upload") return multistep.ActionContinue } // Get the guest additions path since we're doing it guestAdditionsPath := state.Get("guest_additions_path").(string) version, err := driver.Version() if err != nil { state.Put("error", fmt.Errorf("Error reading version for guest additions upload: %s", err)) return multistep.ActionHalt } f, err := os.Open(guestAdditionsPath) if err != nil { state.Put("error", fmt.Errorf("Error opening guest additions ISO: %s", err)) return multistep.ActionHalt } s.Ctx.Data = &guestAdditionsPathTemplate{ Version: version, } s.GuestAdditionsPath, err = interpolate.Render(s.GuestAdditionsPath, &s.Ctx) if err != nil { err := fmt.Errorf("Error preparing guest additions path: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say("Uploading VirtualBox guest additions ISO...") if err := comm.Upload(s.GuestAdditionsPath, f, nil); err != nil { state.Put("error", fmt.Errorf("Error uploading guest additions: %s", err)) return multistep.ActionHalt } return multistep.ActionContinue }
// PostProcess is the main entry point. It calls a Provider's Convert() method // to delegate conversion to that Provider's command-line tool. func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { provider, err := providerForBuilderId(artifact.BuilderId()) if err != nil { return nil, false, err } ui.Say(fmt.Sprintf("Converting %s image to VHD file...", provider)) // Render output path. p.config.ctx.Data = &outputPathTemplate{ ArtifactId: artifact.Id(), BuildName: p.config.PackerBuildName, Provider: provider.Name(), } outputPath, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { return nil, false, err } // Check if VHD file exists. Remove if the user specified `force` in the // template or `--force` on the command-line. // This differs from the Vagrant post-processor because the the VHD can be // used (and written to) immediately. It is comparable to a Builder // end-product. if _, err = os.Stat(outputPath); err == nil { if p.config.PackerForce || p.config.Force { ui.Message(fmt.Sprintf("Removing existing VHD file at %s", outputPath)) os.Remove(outputPath) } else { return nil, false, fmt.Errorf("VHD file exists: %s\nUse the force flag to delete it.", outputPath) } } err = provider.Convert(ui, artifact, outputPath) if err != nil { return nil, false, err } ui.Say(fmt.Sprintf("Converted VHD: %s", outputPath)) artifact = NewArtifact(provider.String(), outputPath) keep := p.config.KeepInputArtifact return artifact, keep, nil }
// Run uploads the Parallels Tools ISO to the VM. func (s *StepUploadParallelsTools) Run(state multistep.StateBag) multistep.StepAction { comm := state.Get("communicator").(packer.Communicator) ui := state.Get("ui").(packer.Ui) // If we're attaching then don't do this, since we attached. if s.ParallelsToolsMode != ParallelsToolsModeUpload { log.Println("Not uploading Parallels Tools since mode is not upload") return multistep.ActionContinue } // Get the Paralells Tools path on the host machine parallelsToolsPath := state.Get("parallels_tools_path").(string) f, err := os.Open(parallelsToolsPath) if err != nil { state.Put("error", fmt.Errorf("Error opening Parallels Tools ISO: %s", err)) return multistep.ActionHalt } defer f.Close() s.Ctx.Data = &toolsPathTemplate{ Flavor: s.ParallelsToolsFlavor, } s.ParallelsToolsGuestPath, err = interpolate.Render(s.ParallelsToolsGuestPath, &s.Ctx) if err != nil { err = fmt.Errorf("Error preparing Parallels Tools path: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } ui.Say(fmt.Sprintf("Uploading Parallels Tools for '%s' to path: '%s'", s.ParallelsToolsFlavor, s.ParallelsToolsGuestPath)) if err := comm.Upload(s.ParallelsToolsGuestPath, f, nil); err != nil { err = fmt.Errorf("Error uploading Parallels Tools: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } return multistep.ActionContinue }
func processArgs(args [][]string, ctx *interpolate.Context) ([][]string, error) { var err error if args == nil { return make([][]string, 0), err } newArgs := make([][]string, len(args)) for argsIdx, rowArgs := range args { parms := make([]string, len(rowArgs)) newArgs[argsIdx] = parms for i, parm := range rowArgs { parms[i], err = interpolate.Render(parm, ctx) if err != nil { return nil, err } } } return newArgs, err }
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) { jsonBytes, err := json.Marshal(p.config.Json) if err != nil { // This really shouldn't happen since we literally just unmarshalled panic(err) } // Copy the user variables so that we can restore them later, and // make sure we make the quotes JSON-friendly in the user variables. originalUserVars := make(map[string]string) for k, v := range p.config.ctx.UserVariables { originalUserVars[k] = v } // Make sure we reset them no matter what defer func() { p.config.ctx.UserVariables = originalUserVars }() // Make the current user variables JSON string safe. for k, v := range p.config.ctx.UserVariables { v = strings.Replace(v, `\`, `\\`, -1) v = strings.Replace(v, `"`, `\"`, -1) p.config.ctx.UserVariables[k] = v } // Process the bytes with the template processor p.config.ctx.Data = nil jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx) if err != nil { return nil, err } var result map[string]interface{} if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil { return nil, err } return result, nil }
// Run executes `prlctl` commands. func (s *StepPrlctl) Run(state multistep.StateBag) multistep.StepAction { driver := state.Get("driver").(Driver) ui := state.Get("ui").(packer.Ui) vmName := state.Get("vmName").(string) if len(s.Commands) > 0 { ui.Say("Executing custom prlctl commands...") } s.Ctx.Data = &commandTemplate{ Name: vmName, } for _, originalCommand := range s.Commands { command := make([]string, len(originalCommand)) copy(command, originalCommand) for i, arg := range command { var err error command[i], err = interpolate.Render(arg, &s.Ctx) if err != nil { err = fmt.Errorf("Error preparing prlctl command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } } ui.Message(fmt.Sprintf("Executing: prlctl %s", strings.Join(command, " "))) if err := driver.Prlctl(command...); err != nil { err = fmt.Errorf("Error executing command: %s", err) state.Put("error", err) ui.Error(err.Error()) return multistep.ActionHalt } } return multistep.ActionContinue }
func (p *HostCommandProvisioner) Provision(ui packer.Ui, comm packer.Communicator) error { // Build our variables up by adding in the build name and builder type env := make([]string, len(p.config.Vars)+2) env[0] = "PACKER_BUILD_NAME=" + p.config.PackerBuildName env[1] = "PACKER_BUILDER_TYPE=" + p.config.PackerBuilderType copy(env[2:], p.config.Vars) // Run commands directly on the host machine for _, cmdStr := range p.config.Commands { ui.Say(fmt.Sprintf("Running host command: %s", cmdStr)) // Compile the full command string p.config.ctx.Data = &ExecuteCommandTemplate{ Command: cmdStr, } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return fmt.Errorf("Error processing command '%s': %s", cmdStr, err) } // We have to split the command string into the actual command and its arguments parts, err := shlex.Split(command) cmd := exec.Command(parts[0], parts[1:]...) cmd.Env = env // Redirect stdout/stderr to the ui writer cmd.Stdout = CommandWriter{WriteFunc: ui.Say} cmd.Stderr = CommandWriter{WriteFunc: ui.Error} if err := cmd.Run(); err != nil { return fmt.Errorf("Error running command '%s': %s", cmdStr, err) } } return nil }