func delVNIC(ui packer.Ui, f *find.Finder, ctx context.Context, vm *object.VirtualMachine) error { ui.Say("Deleting NIC ") devicelst, err := vm.Device(ctx) if err != nil { return err } for _, device := range devicelst { switch device.(type) { case *types.VirtualVmxnet3: ui.Message(fmt.Sprintf("Removing NIC %s\n", device.GetVirtualDevice().DeviceInfo)) err := vm.RemoveDevice(ctx, device) if err != nil { return err } return nil case *types.VirtualE1000: ui.Message(fmt.Sprintf("Removing NIC %s\n", device.GetVirtualDevice().DeviceInfo)) err := vm.RemoveDevice(ctx, device) if err != nil { return err } return nil default: fmt.Printf("Type %s\n", reflect.TypeOf(device).Elem()) fmt.Printf("Device info %s\n", device.GetVirtualDevice().DeviceInfo) } } return nil } //
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != dockerimport.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only tag from Docker builder artifacts.", artifact.BuilderId()) return nil, false, err } dockerfile, template_err := p.render_template(trim_artifact_id(artifact.Id())) if template_err != nil { // could not render template return nil, false, template_err } log.Printf("[DEBUG] Dockerfile: %s\n", dockerfile.String()) if image_id, err := p.docker_build_fn(dockerfile); err != nil { // docker build command failed ui.Error("docker build command failed: " + err.Error()) return nil, false, err } else { ui.Message("Built image: " + image_id) new_artifact := &docker.ImportArtifact{ BuilderIdValue: dockerimport.BuilderId, Driver: &docker.DockerDriver{Ui: ui, Tpl: nil}, IdValue: image_id, } log.Printf("[DEBUG] artifact: %#v\n", new_artifact) return new_artifact, true, nil } }
func (p *Provisioner) uploadManifests(ui packer.Ui, comm packer.Communicator) (string, error) { // Create the remote manifests directory... ui.Message("Uploading manifests...") remoteManifestsPath := fmt.Sprintf("%s/manifests", p.config.StagingDir) if err := p.createDir(ui, comm, remoteManifestsPath); err != nil { return "", fmt.Errorf("Error creating manifests directory: %s", err) } // Upload the main manifest f, err := os.Open(p.config.ManifestFile) if err != nil { return "", err } defer f.Close() manifestFilename := p.config.ManifestFile if fi, err := os.Stat(p.config.ManifestFile); err != nil { return "", fmt.Errorf("Error inspecting manifest file: %s", err) } else if !fi.IsDir() { manifestFilename = filepath.Base(manifestFilename) } else { ui.Say("WARNING: manifest_file should be a file. Use manifest_dir for directories") } remoteManifestFile := fmt.Sprintf("%s/%s", remoteManifestsPath, manifestFilename) if err := comm.Upload(remoteManifestFile, f, nil); err != nil { return "", err } return remoteManifestFile, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != dockerimport.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only import from docker-import artifacts.", artifact.BuilderId()) return nil, false, err } driver := p.Driver if driver == nil { // If no driver is set, then we use the real driver driver = &docker.DockerDriver{Tpl: p.config.tpl, Ui: ui} } // Get the name. We strip off any tags from the name because the // push doesn't use those. name := artifact.Id() if i := strings.Index(name, "/"); i >= 0 { // This should always be true because the / is required. But we have // to get the index to this so we don't accidentally strip off the port if j := strings.Index(name[i:], ":"); j >= 0 { name = name[:i+j] } } ui.Message("Pushing: " + name) if err := driver.Push(name); err != nil { return nil, false, err } return nil, false, nil }
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error { ui.Message("Installing Chef...") p.config.ctx.Data = &InstallChefTemplate{ Sudo: !p.config.PreventSudo, } command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx) if err != nil { return err } ui.Message(command) cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf( "Install script exited with non-zero exit status %d", cmd.ExitStatus) } return nil }
func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'knife.rb'") // Read the template tpl := DefaultKnifeTemplate ctx := p.config.ctx ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, ClientKey: clientKey, SslVerifyMode: sslVerifyMode, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error { ui.Message(fmt.Sprintf("Creating directory: %s", dir)) mkdirCmd := fmt.Sprintf("mkdir -p '%s'", dir) if !p.config.PreventSudo { mkdirCmd = "sudo " + mkdirCmd } cmd := &packer.RemoteCmd{Command: mkdirCmd} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status. See output above for more info.") } // Chmod the directory to 0777 just so that we can access it as our user mkdirCmd = fmt.Sprintf("chmod 0777 '%s'", dir) if !p.config.PreventSudo { mkdirCmd = "sudo " + mkdirCmd } cmd = &packer.RemoteCmd{Command: mkdirCmd} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status. See output above for more info.") } return nil }
func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) { ui.Message("Creating JSON attribute file") jsonData := make(map[string]interface{}) // Copy the configured JSON for k, v := range p.config.Json { jsonData[k] = v } // Set the run list if it was specified if len(p.config.RunList) > 0 { jsonData["run_list"] = p.config.RunList } jsonBytes, err := json.MarshalIndent(jsonData, "", " ") if err != nil { return "", err } // Upload the bytes remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "node.json")) if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil { return "", err } return remotePath, nil }
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, localCookbooks []string) (string, error) { ui.Message("Creating configuration file 'solo.rb'") cookbook_paths := make([]string, len(p.config.RemoteCookbookPaths)+len(localCookbooks)) for i, path := range p.config.RemoteCookbookPaths { cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } for i, path := range localCookbooks { i = len(p.config.RemoteCookbookPaths) + i cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } configString, err := p.config.tpl.Process(DefaultConfigTemplate, &ConfigTemplate{ CookbookPaths: strings.Join(cookbook_paths, ","), }) if err != nil { return "", err } remotePath := filepath.Join(p.config.StagingDir, "solo.rb") if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString))); err != nil { return "", err } return remotePath, nil }
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error { p.config.ctx.Data = &ExecuteTemplate{ ConfigPath: config, JsonPath: json, Sudo: !p.config.PreventSudo, } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return err } ui.Message(fmt.Sprintf("Executing Chef: %s", command)) cmd := &packer.RemoteCmd{ Command: command, } if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus) } return nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != dockerimport.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only tag from Docker builder artifacts.", artifact.BuilderId()) return nil, false, err } driver := p.Driver if driver == nil { // If no driver is set, then we use the real driver driver = &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} } importRepo := p.config.Repository if p.config.Tag != "" { importRepo += ":" + p.config.Tag } ui.Message("Tagging image: " + artifact.Id()) ui.Message("Repository: " + importRepo) err := driver.TagImage(artifact.Id(), importRepo, p.config.Force) if err != nil { return nil, false, err } // Build the artifact artifact = &docker.ImportArtifact{ BuilderIdValue: BuilderId, Driver: driver, IdValue: importRepo, } return artifact, true, nil }
func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) error { playbook := filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile)) // The inventory must be set to "127.0.0.1,". The comma is important // as its the only way to override the ansible inventory when dealing // with a single host. extraArgs := "" if len(p.config.ExtraArguments) > 0 { extraArgs = " " + strings.Join(p.config.ExtraArguments, " ") } command := fmt.Sprintf("%s %s%s -c local -i \"127.0.0.1,\"", p.config.Command, playbook, extraArgs) ui.Message(fmt.Sprintf("Executing Ansible: %s", command)) cmd := &packer.RemoteCmd{ Command: command, } if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { if cmd.ExitStatus == 127 { return fmt.Errorf("%s could not be found. Verify that it is available on the\n"+ "PATH after connecting to the machine.", p.config.Command) } return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus) } return nil }
func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) error { playbook := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) inventory := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.InventoryFile))) extraArgs := "" if len(p.config.ExtraArguments) > 0 { extraArgs = " " + strings.Join(p.config.ExtraArguments, " ") } command := fmt.Sprintf("cd %s && %s %s%s -c local -i %s", p.config.StagingDir, p.config.Command, playbook, extraArgs, inventory) ui.Message(fmt.Sprintf("Executing Ansible: %s", command)) cmd := &packer.RemoteCmd{ Command: command, } if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { if cmd.ExitStatus == 127 { return fmt.Errorf("%s could not be found. Verify that it is available on the\n"+ "PATH after connecting to the machine.", p.config.Command) } return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus) } return nil }
func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator, privKeyFile string, checkHostKey bool) error { playbook, _ := filepath.Abs(p.config.PlaybookFile) inventory := p.config.inventoryFile var envvars []string args := []string{playbook, "-i", inventory} if len(privKeyFile) > 0 { args = append(args, "--private-key", privKeyFile) } args = append(args, p.config.ExtraArguments...) if len(p.config.AnsibleEnvVars) > 0 { envvars = append(envvars, p.config.AnsibleEnvVars...) } cmd := exec.Command(p.config.Command, args...) cmd.Env = os.Environ() if len(envvars) > 0 { cmd.Env = append(cmd.Env, envvars...) } if !checkHostKey { cmd.Env = append(cmd.Env, "ANSIBLE_HOST_KEY_CHECKING=False") } stdout, err := cmd.StdoutPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } wg := sync.WaitGroup{} repeat := func(r io.ReadCloser) { scanner := bufio.NewScanner(r) for scanner.Scan() { ui.Message(scanner.Text()) } if err := scanner.Err(); err != nil { ui.Error(err.Error()) } wg.Done() } wg.Add(2) go repeat(stdout) go repeat(stderr) ui.Say(fmt.Sprintf("Executing Ansible: %s", strings.Join(cmd.Args, " "))) cmd.Start() wg.Wait() err = cmd.Wait() if err != nil { return fmt.Errorf("Non-zero exit status: %s", err) } return nil }
func (p *OVFPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != "mitchellh.vmware" { return nil, false, fmt.Errorf("ovftool post-processor can only be used on VMware boxes: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path } } if vmx == "" { return nil, false, fmt.Errorf("VMX file could not be located.") } // Strip DVD and floppy drives from the VMX if err := p.stripDrives(vmx); err != nil { return nil, false, fmt.Errorf("Couldn't strip floppy/DVD drives from VMX") } p.cfg.ctx.Data = &OutputPathTemplate{ ArtifactId: artifact.Id(), BuildName: p.cfg.BuildName, Provider: "vmware", } targetPath, err := interpolate.Render(p.cfg.TargetPath, &p.cfg.ctx) if err != nil { return nil, false, err } // build the arguments args := []string{ "--targetType=" + p.cfg.TargetType, "--acceptAllEulas", } // append --compression, if it is set if p.cfg.Compression > 0 { args = append(args, fmt.Sprintf("--compress=%d", p.cfg.Compression)) } // add the source/target args = append(args, vmx, targetPath) ui.Message(fmt.Sprintf("Executing ovftool with arguments: %+v", args)) cmd := exec.Command(executable, args...) var buffer bytes.Buffer cmd.Stdout = &buffer cmd.Stderr = &buffer err = cmd.Run() if err != nil { return nil, false, fmt.Errorf("Unable to execute ovftool: %s", buffer.String()) } ui.Message(fmt.Sprintf("%s", buffer.String())) return artifact, false, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != "mitchellh.post-processor.vagrant" { return nil, false, fmt.Errorf( "Unknown artifact type, requires box from vagrant post-processor: %s", artifact.BuilderId()) } box := artifact.Files()[0] if !strings.HasSuffix(box, ".box") { return nil, false, fmt.Errorf( "Unknown files in artifact from vagrant post-processor: %s", artifact.Files()) } provider := providerFromBuilderName(artifact.Id()) file, err := os.Open(box) if err != nil { return nil, false, err } defer file.Close() info, err := file.Stat() if err != nil { return nil, false, err } size := info.Size() ui.Message(fmt.Sprintf("Box size: %s (%d bytes)", box, size)) metadata, err := p.getMetadata() if err != nil { return nil, false, err } ui.Message("Generating checksum") checksum, err := sum256(file) if err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Checksum is %s", checksum)) ui.Message(fmt.Sprintf("Adding %s %s box to metadata", provider, p.config.Version)) if err := metadata.Add(p.config.Version, &Provider{ Name: provider, Url: fmt.Sprintf("%s/%s/%s", p.config.UrlPrefix, p.config.BoxDir, path.Base(box)), ChecksumType: "sha256", Checksum: checksum, }); err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Saving the metadata: %s", p.config.MetadataPath)) if err := p.putMetadata(metadata); err != nil { return nil, false, err } return &Artifact{fmt.Sprintf("%s/%s", p.config.UrlPrefix, p.config.MetadataPath)}, true, nil }
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error { ui.Message(fmt.Sprintf("Removing directory: %s", dir)) cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)} if err := cmd.StartWithUi(comm, ui); err != nil { return err } return nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path break } } if vmx == "" { return nil, false, fmt.Errorf("VMX file not found") } if p.config.RemoveEthernet == "true" { if err := p.RemoveEthernet(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing ethernet0 interface from VMX failed!") } } if p.config.RemoveFloppy == "true" { if err := p.RemoveFloppy(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing floppy drive from VMX failed!") } } if p.config.RemoveOpticalDrive == "true" { if err := p.RemoveOpticalDrive(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing CD/DVD Drive from VMX failed!") } } args := []string{ "--acceptAllEulas", fmt.Sprintf("--diskMode=%s", p.config.DiskMode), fmt.Sprintf("%s", vmx), fmt.Sprintf("%s", p.config.Target), } ui.Message(fmt.Sprintf("Exporting %s to %s", vmx, p.config.Target)) var out bytes.Buffer log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) cmd.Stdout = &out if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\nStdout: %s", err, out.String()) } ui.Message(fmt.Sprintf("%s", out.String())) return artifact, false, nil }
func ConvertToEC2Tags(tags map[string]string, ui packer.Ui) []*ec2.Tag { var amiTags []*ec2.Tag for key, value := range tags { ui.Message(fmt.Sprintf("Adding tag: \"%s\": \"%s\"", key, value)) amiTags = append(amiTags, &ec2.Tag{ Key: aws.String(key), Value: aws.String(value), }) } return amiTags }
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, localCookbooks []string, rolesPath string, dataBagsPath string, encryptedDataBagSecretPath string, environmentsPath string, chefEnvironment string) (string, error) { ui.Message("Creating configuration file 'solo.rb'") cookbook_paths := make([]string, len(p.config.RemoteCookbookPaths)+len(localCookbooks)) for i, path := range p.config.RemoteCookbookPaths { cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } for i, path := range localCookbooks { i = len(p.config.RemoteCookbookPaths) + i cookbook_paths[i] = fmt.Sprintf(`"%s"`, path) } // Read the template tpl := DefaultConfigTemplate if p.config.ConfigTemplate != "" { f, err := os.Open(p.config.ConfigTemplate) if err != nil { return "", err } defer f.Close() tplBytes, err := ioutil.ReadAll(f) if err != nil { return "", err } tpl = string(tplBytes) } p.config.ctx.Data = &ConfigTemplate{ CookbookPaths: strings.Join(cookbook_paths, ","), RolesPath: rolesPath, DataBagsPath: dataBagsPath, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, EnvironmentsPath: environmentsPath, HasRolesPath: rolesPath != "", HasDataBagsPath: dataBagsPath != "", HasEncryptedDataBagSecretPath: encryptedDataBagSecretPath != "", HasEnvironmentsPath: environmentsPath != "", ChefEnvironment: chefEnvironment, } configString, err := interpolate.Render(tpl, &p.config.ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "solo.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
// Execute wraps qemu-img to run a QEMU command. func (p *QEMUProvider) Execute(ui packer.Ui, command ...string) error { driver, err := newQEMUDriver() if err != nil { return err } ui.Message(fmt.Sprintf("Executing: %s", strings.Join(command, " "))) if err = driver.QemuImg(command...); err != nil { return err } return nil }
func (p *Provisioner) createConfig( ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, encryptedDataBagSecretPath, remoteKeyPath string, validationClientName string, chefEnvironment string, sslVerifyMode string) (string, error) { ui.Message("Creating configuration file 'client.rb'") // Read the template tpl := DefaultConfigTemplate if p.config.ConfigTemplate != "" { f, err := os.Open(p.config.ConfigTemplate) if err != nil { return "", err } defer f.Close() tplBytes, err := ioutil.ReadAll(f) if err != nil { return "", err } tpl = string(tplBytes) } ctx := p.config.ctx ctx.Data = &ConfigTemplate{ NodeName: nodeName, ServerUrl: serverUrl, ClientKey: clientKey, ValidationKeyPath: remoteKeyPath, ValidationClientName: validationClientName, ChefEnvironment: chefEnvironment, SslVerifyMode: sslVerifyMode, EncryptedDataBagSecretPath: encryptedDataBagSecretPath, } configString, err := interpolate.Render(tpl, &ctx) if err != nil { return "", err } remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb")) if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil { return "", err } return remotePath, nil }
// Execute wraps VBoxManage to run a VirtualBox command. func (p *VirtualBoxProvider) Execute(ui packer.Ui, command ...string) error { driver, err := vboxcommon.NewDriver() if err != nil { return err } ui.Message(fmt.Sprintf("Executing: %s", strings.Join(command, " "))) if err = driver.VBoxManage(command...); err != nil { return err } return nil }
func (p *Provisioner) uploadFile(ui packer.Ui, comm packer.Communicator, remotePath string, localPath string) error { ui.Message(fmt.Sprintf("Uploading %s...", localPath)) f, err := os.Open(localPath) if err != nil { return err } defer f.Close() return comm.Upload(remotePath, f, nil) }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path break } } if vmx == "" { return nil, false, fmt.Errorf("VMX file not found") } ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", url.QueryEscape(p.config.Username), url.QueryEscape(p.config.Password), p.config.Host, p.config.Datacenter, p.config.Cluster) if p.config.ResourcePool != "" { ovftool_uri += "/Resources/" + p.config.ResourcePool } args := []string{ fmt.Sprintf("--noSSLVerify=%t", p.config.Insecure), "--acceptAllEulas", fmt.Sprintf("--name=%s", p.config.VMName), fmt.Sprintf("--datastore=%s", p.config.Datastore), fmt.Sprintf("--diskMode=%s", p.config.DiskMode), fmt.Sprintf("--network=%s", p.config.VMNetwork), fmt.Sprintf("--vmFolder=%s", p.config.VMFolder), fmt.Sprintf("%s", vmx), fmt.Sprintf("%s", ovftool_uri), } ui.Message(fmt.Sprintf("Uploading %s to vSphere", vmx)) var out bytes.Buffer log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) cmd.Stdout = &out if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\nStdout: %s", err, out.String()) } ui.Message(fmt.Sprintf("%s", out.String())) return artifact, false, nil }
func (p *Provisioner) moveFile(ui packer.Ui, comm packer.Communicator, dst, src string) error { ui.Message(fmt.Sprintf("Moving %s to %s", src, dst)) cmd := &packer.RemoteCmd{Command: fmt.Sprintf(p.sudo("mv %s %s"), src, dst)} if err := cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) } return nil }
func (p *Provisioner) createHints(ui packer.Ui, comm packer.Communicator) error { ui.Message(fmt.Sprintf("Creating directory: /etc/chef/ohai/hints")) cmd := &packer.RemoteCmd{ Command: fmt.Sprintf("mkdir -p /etc/chef/ohai/hints; echo '{}' > /etc/chef/ohai/hints/ec2.json"), } if err := cmd.StartWithUi(comm, ui); err != nil { return err } return nil }
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error { ui.Message(fmt.Sprintf("Creating directory: %s", dir)) cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)} if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status. See output above for more info.") } return nil }
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error { ui.Message(fmt.Sprintf("Removing directory: %s", dir)) cmd := &packer.RemoteCmd{ Command: fmt.Sprintf("rm -rf '%s'", dir), } if err := cmd.StartWithUi(comm, ui); err != nil { return err } if cmd.ExitStatus != 0 { return fmt.Errorf("Non-zero exit status.") } return nil }
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { var err error ui.Say("Provisioning with Salt...") if !p.config.SkipBootstrap { cmd := &packer.RemoteCmd{ Command: fmt.Sprintf("wget -O - http://bootstrap.saltstack.org | sudo sh -s %s", p.config.BootstrapArgs), } ui.Message(fmt.Sprintf("Installing Salt with command %s", cmd)) if err = cmd.StartWithUi(comm, ui); err != nil { return fmt.Errorf("Unable to install Salt: %d", err) } } ui.Message(fmt.Sprintf("Creating remote directory: %s", p.config.TempConfigDir)) cmd := &packer.RemoteCmd{Command: fmt.Sprintf("mkdir -p %s", p.config.TempConfigDir)} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } return fmt.Errorf("Error creating remote salt state directory: %s", err) } ui.Message(fmt.Sprintf("Uploading local state tree: %s", p.config.LocalStateTree)) if err = UploadLocalDirectory(p.config.LocalStateTree, p.config.TempConfigDir, comm, ui); err != nil { return fmt.Errorf("Error uploading local state tree to remote: %s", err) } ui.Message(fmt.Sprintf("Moving %s to /srv/salt", p.config.TempConfigDir)) cmd = &packer.RemoteCmd{Command: fmt.Sprintf("sudo mv %s /srv/salt", p.config.TempConfigDir)} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } return fmt.Errorf("Unable to move %s to /srv/salt: %d", p.config.TempConfigDir, err) } ui.Message("Running highstate") cmd = &packer.RemoteCmd{Command: "sudo salt-call --local state.highstate -l info"} if err = cmd.StartWithUi(comm, ui); err != nil || cmd.ExitStatus != 0 { if err == nil { err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) } return fmt.Errorf("Error executing highstate: %s", err) } return nil }