func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != docker.BuilderId { err := fmt.Errorf( "Unknown artifact type: %s\nCan only import from Docker builder artifacts.", artifact.BuilderId()) return nil, false, err } importRepo := p.config.Repository if p.config.Tag != "" { importRepo += ":" + p.config.Tag } driver := &docker.DockerDriver{Ctx: &p.config.ctx, Ui: ui} ui.Message("Importing image: " + artifact.Id()) ui.Message("Repository: " + importRepo) id, err := driver.Import(artifact.Files()[0], importRepo) if err != nil { return nil, false, err } ui.Message("Imported ID: " + id) // Build the artifact artifact = &docker.ImportArtifact{ BuilderIdValue: BuilderId, Driver: driver, IdValue: importRepo, } return artifact, false, nil }
func (p *VBoxBoxPostProcessor) findBaseMacAddress(a packer.Artifact) (string, error) { log.Println("Looking for OVF for base mac address...") var ovf string for _, f := range a.Files() { if strings.HasSuffix(f, ".ovf") { log.Printf("OVF found: %s", f) ovf = f break } } if ovf == "" { return "", errors.New("ovf file couldn't be found") } f, err := os.Open(ovf) if err != nil { return "", err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return "", err } re := regexp.MustCompile(`<Adapter slot="0".+?MACAddress="(.+?)"`) matches := re.FindSubmatch(data) if matches == nil { return "", errors.New("can't find base mac address in OVF") } log.Printf("Base mac address: %s", string(matches[1])) return string(matches[1]), nil }
func NewArtifact(artifact packer.Artifact) *Artifact { return &Artifact{ builderId: artifact.BuilderId(), files: artifact.Files(), id: artifact.Id(), str: artifact.String(), } }
func (p *OVFPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != "mitchellh.vmware" { return nil, false, fmt.Errorf("ovftool post-processor can only be used on VMware boxes: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path } } if vmx == "" { return nil, false, fmt.Errorf("VMX file could not be located.") } // Strip DVD and floppy drives from the VMX if err := p.stripDrives(vmx); err != nil { return nil, false, fmt.Errorf("Couldn't strip floppy/DVD drives from VMX") } p.cfg.ctx.Data = &OutputPathTemplate{ ArtifactId: artifact.Id(), BuildName: p.cfg.BuildName, Provider: "vmware", } targetPath, err := interpolate.Render(p.cfg.TargetPath, &p.cfg.ctx) if err != nil { return nil, false, err } // build the arguments args := []string{ "--targetType=" + p.cfg.TargetType, "--acceptAllEulas", } // append --compression, if it is set if p.cfg.Compression > 0 { args = append(args, fmt.Sprintf("--compress=%d", p.cfg.Compression)) } // add the source/target args = append(args, vmx, targetPath) ui.Message(fmt.Sprintf("Executing ovftool with arguments: %+v", args)) cmd := exec.Command(executable, args...) var buffer bytes.Buffer cmd.Stdout = &buffer cmd.Stderr = &buffer err = cmd.Run() if err != nil { return nil, false, fmt.Errorf("Unable to execute ovftool: %s", buffer.String()) } ui.Message(fmt.Sprintf("%s", buffer.String())) return artifact, false, nil }
func (self *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { ui.Say(fmt.Sprintf("Creating archive for '%s'", artifact.BuilderId())) // Create the compressed archive file at the appropriate OutputPath. fw, err := os.Create(self.config.OutputPath) if err != nil { return nil, false, fmt.Errorf( "Failed creating file for compressed archive: %s", self.config.OutputPath) } defer fw.Close() gw := gzip.NewWriter(fw) defer gw.Close() // Iterate through all of the artifact's files and put them into the // compressed archive using the tar/gzip writers. for _, path := range artifact.Files() { fi, err := os.Stat(path) if err != nil { return nil, false, fmt.Errorf( "Failed stating file: %s", path) } target, _ := os.Readlink(path) header, err := tar.FileInfoHeader(fi, target) if err != nil { return nil, false, fmt.Errorf( "Failed creating archive header: %s", path) } tw := tar.NewWriter(gw) defer tw.Close() // Write the header first to the archive. This takes partial data // from the FileInfo that is grabbed by running the stat command. if err := tw.WriteHeader(header); err != nil { return nil, false, fmt.Errorf( "Failed writing archive header: %s", path) } // Open the target file for archiving and compressing. fr, err := os.Open(path) if err != nil { return nil, false, fmt.Errorf( "Failed opening file '%s' to write compressed archive.", path) } defer fr.Close() if _, err = io.Copy(tw, fr); err != nil { return nil, false, fmt.Errorf( "Failed copying file to archive: %s", path) } } return NewArtifact(artifact.BuilderId(), self.config.OutputPath), false, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if artifact.BuilderId() != "mitchellh.post-processor.vagrant" { return nil, false, fmt.Errorf( "Unknown artifact type, requires box from vagrant post-processor: %s", artifact.BuilderId()) } box := artifact.Files()[0] if !strings.HasSuffix(box, ".box") { return nil, false, fmt.Errorf( "Unknown files in artifact from vagrant post-processor: %s", artifact.Files()) } provider := providerFromBuilderName(artifact.Id()) file, err := os.Open(box) if err != nil { return nil, false, err } defer file.Close() info, err := file.Stat() if err != nil { return nil, false, err } size := info.Size() ui.Message(fmt.Sprintf("Box size: %s (%d bytes)", box, size)) metadata, err := p.getMetadata() if err != nil { return nil, false, err } ui.Message("Generating checksum") checksum, err := sum256(file) if err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Checksum is %s", checksum)) ui.Message(fmt.Sprintf("Adding %s %s box to metadata", provider, p.config.Version)) if err := metadata.Add(p.config.Version, &Provider{ Name: provider, Url: fmt.Sprintf("%s/%s/%s", p.config.UrlPrefix, p.config.BoxDir, path.Base(box)), ChecksumType: "sha256", Checksum: checksum, }); err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Saving the metadata: %s", p.config.MetadataPath)) if err := p.putMetadata(metadata); err != nil { return nil, false, err } return &Artifact{fmt.Sprintf("%s/%s", p.config.UrlPrefix, p.config.MetadataPath)}, true, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path break } } if vmx == "" { return nil, false, fmt.Errorf("VMX file not found") } if p.config.RemoveEthernet == "true" { if err := p.RemoveEthernet(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing ethernet0 interface from VMX failed!") } } if p.config.RemoveFloppy == "true" { if err := p.RemoveFloppy(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing floppy drive from VMX failed!") } } if p.config.RemoveOpticalDrive == "true" { if err := p.RemoveOpticalDrive(vmx, ui, artifact); err != nil { return nil, false, fmt.Errorf("Removing CD/DVD Drive from VMX failed!") } } args := []string{ "--acceptAllEulas", fmt.Sprintf("--diskMode=%s", p.config.DiskMode), fmt.Sprintf("%s", vmx), fmt.Sprintf("%s", p.config.Target), } ui.Message(fmt.Sprintf("Exporting %s to %s", vmx, p.config.Target)) var out bytes.Buffer log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) cmd.Stdout = &out if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\nStdout: %s", err, out.String()) } ui.Message(fmt.Sprintf("%s", out.String())) return artifact, false, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { keep := p.config.KeepInputArtifact importName := p.config.ImportName metaData := Metadata{importName, "virtualbox"} var stdout bytes.Buffer var stderr bytes.Buffer stdout.Reset() stderr.Reset() outputDir := filepath.Dir(artifact.Files()[0]) marshalledJson, err := json.Marshal(metaData) if err != nil { panic(err) } ui.Say(fmt.Sprintf("Creating metadata file: %s", outputDir+"/metadata.json")) f, err := os.Create(outputDir + "/metadata.json") if err != nil { panic(err) } _, err = f.Write(marshalledJson) if err != nil { panic(err) } f.Sync() vagrant.DirToBox("./output.box", filepath.Dir(artifact.Files()[0]), ui, 0) ui.Say(fmt.Sprintf("Importing box into vagrant: %s", importName)) if _, err := os.Stat("./output.box"); err != nil { return nil, false, fmt.Errorf("Unable to find box: ./output.box") } cmd := exec.Command("sh", "-c", fmt.Sprintf("vagrant box add --name %s ./output.box", importName)) cmd.Stdout = &stdout cmd.Stderr = &stderr err = cmd.Run() stdoutString := strings.TrimSpace(stdout.String()) stderrString := strings.TrimSpace(stderr.String()) os.Remove("./output.box") if err != nil { return nil, false, fmt.Errorf("Error importing: %s", stderrString) } log.Printf("stdout: %s", stdoutString) log.Printf("stderr: %s", stderrString) return artifact, keep, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path break } } if vmx == "" { return nil, false, fmt.Errorf("VMX file not found") } ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", url.QueryEscape(p.config.Username), url.QueryEscape(p.config.Password), p.config.Host, p.config.Datacenter, p.config.Cluster) if p.config.ResourcePool != "" { ovftool_uri += "/Resources/" + p.config.ResourcePool } args := []string{ fmt.Sprintf("--noSSLVerify=%t", p.config.Insecure), "--acceptAllEulas", fmt.Sprintf("--name=%s", p.config.VMName), fmt.Sprintf("--datastore=%s", p.config.Datastore), fmt.Sprintf("--diskMode=%s", p.config.DiskMode), fmt.Sprintf("--network=%s", p.config.VMNetwork), fmt.Sprintf("--vmFolder=%s", p.config.VMFolder), fmt.Sprintf("%s", vmx), fmt.Sprintf("%s", ovftool_uri), } ui.Message(fmt.Sprintf("Uploading %s to vSphere", vmx)) var out bytes.Buffer log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) cmd.Stdout = &out if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\nStdout: %s", err, out.String()) } ui.Message(fmt.Sprintf("%s", out.String())) return artifact, false, nil }
func (p *HypervProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { // Create the metadata metadata = map[string]interface{}{"provider": "hyperv"} // ui.Message(fmt.Sprintf("artifacts all: %+v", artifact)) var outputDir string // Vargant requires specific dir structure for hyperv // hyperv builder creates the structure in the output dir // we have to keep the structure in a temp dir // hack little bit but string in artifact usually have output dir artifactString := artifact.String() d := strings.Split(artifactString, ": ") outputDir = d[1] // ui.Message(fmt.Sprintf("artifact dir from string: %s", outputDir)) // Copy all of the original contents into the temporary directory for _, path := range artifact.Files() { ui.Message(fmt.Sprintf("Copying: %s", path)) var rel string rel, err = filepath.Rel(outputDir, filepath.Dir(path)) // ui.Message(fmt.Sprintf("rel is: %s", rel)) if err != nil { ui.Message(fmt.Sprintf("err in: %s", rel)) return } dstDir := filepath.Join(dir, rel) // ui.Message(fmt.Sprintf("dstdir is: %s", dstDir)) if _, err = os.Stat(dstDir); err != nil { if err = os.MkdirAll(dstDir, 0755); err != nil { ui.Message(fmt.Sprintf("err in creating: %s", dstDir)) return } } dstPath := filepath.Join(dstDir, filepath.Base(path)) if err = CopyContents(dstPath, path); err != nil { ui.Message(fmt.Sprintf("err in copying: %s to %s", path, dstPath)) return } ui.Message(fmt.Sprintf("Copyed %s to %s", path, dstPath)) } return }
func (p *VMwareProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { // Create the metadata metadata = map[string]interface{}{"provider": "vmware_desktop"} // Copy all of the original contents into the temporary directory for _, path := range artifact.Files() { ui.Message(fmt.Sprintf("Copying: %s", path)) dstPath := filepath.Join(dir, filepath.Base(path)) if err = CopyContents(dstPath, path); err != nil { return } } return }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } source := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") || strings.HasSuffix(path, ".ovf") || strings.HasSuffix(path, ".ova") { source = path break } } if source == "" { return nil, false, fmt.Errorf("VMX, OVF or OVA file not found") } ovftool_uri := fmt.Sprintf("vi://%s:%s@%s/%s/host/%s", url.QueryEscape(p.config.Username), url.QueryEscape(p.config.Password), p.config.Host, p.config.Datacenter, p.config.Cluster) if p.config.ResourcePool != "" { ovftool_uri += "/Resources/" + p.config.ResourcePool } ui.Message(fmt.Sprintf("Uploading %s to vSphere", source)) args, err := p.BuildArgs(source, ovftool_uri) if err != nil { ui.Message(fmt.Sprintf("Failed: %s\n", err)) } ui.Message(fmt.Sprintf("Uploading %s to vSphere", source)) log.Printf("Starting ovftool with parameters: %s", strings.Join(args, " ")) cmd := exec.Command("ovftool", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\n", err) } return artifact, false, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, ok := builtins[artifact.BuilderId()]; !ok { return nil, false, fmt.Errorf("Unknown artifact type, can't build box: %s", artifact.BuilderId()) } vmx := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path break } } if vmx == "" { return nil, false, fmt.Errorf("VMX file not found") } ui.Message(fmt.Sprintf("Uploading %s to vSphere", vmx)) args := []string{ fmt.Sprintf("--noSSLVerify=%t", p.config.Insecure), "--acceptAllEulas", fmt.Sprintf("--name=%s", p.config.VMName), fmt.Sprintf("--datastore=%s", p.config.Datastore), fmt.Sprintf("--network=%s", p.config.VMNetwork), fmt.Sprintf("--vmFolder=%s", p.config.VMFolder), fmt.Sprintf("vi://%s:%s@%s/%s/%s", p.config.Username, p.config.Password, p.config.Host, p.config.Datacenter, p.config.PathToResourcePool), } var out bytes.Buffer cmd := exec.Command("ovftool", args...) cmd.Stdout = &out if err := cmd.Run(); err != nil { return nil, false, fmt.Errorf("Failed: %s\nStdout: %s", err, out.String()) } ui.Message(fmt.Sprintf("%s", out.String())) return artifact, false, nil }
// Convert a VirtualBox VMDK artifact to a VHD file. func (p *VirtualBoxProvider) Convert(ui packer.Ui, artifact packer.Artifact, outputPath string) (err error) { var files []string // Unpack the VirtualBox artifact if necessary (if in the OVA format). for _, path := range artifact.Files() { if ext := filepath.Ext(path); ext == ".ova" { // Extract OVA files in place. ui.Message(fmt.Sprintf("Unpacking OVA: %s", path)) dir := filepath.Dir(path) if err = vagrant.DecompressOva(dir, path); err != nil { return err } // Prepare new slice of files to search. glob := filepath.Join(dir, "*") files, err = filepath.Glob(glob) if err != nil { return err } continue } else { files = artifact.Files() } } // Find VirtualBox VMDK. vmdk, err := findVMDK(files...) if err != nil { return err } ui.Message(fmt.Sprintf("Found VirtualBox VMDK: %s", vmdk)) // Convert VMDK to VHD. ui.Message("Cloning VMDK as VHD...") command := []string{ "clonehd", "--format", "VHD", vmdk, outputPath, } if err = p.Execute(ui, command...); err != nil { return fmt.Errorf("Error creating VHD: %s", err) } return nil }
func (p *LibVirtProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { diskName := artifact.State("diskName").(string) // Copy the disk image into the temporary directory (as box.img) for _, path := range artifact.Files() { if strings.HasSuffix(path, "/"+diskName) { ui.Message(fmt.Sprintf("Copying from artifact: %s", path)) dstPath := filepath.Join(dir, "box.img") if err = CopyContents(dstPath, path); err != nil { return } } } format := artifact.State("diskType").(string) origSize := artifact.State("diskSize").(uint64) size := origSize / 1024 // In MB, want GB if origSize%1024 > 0 { // Make sure we don't make the size smaller size++ } domainType := artifact.State("domainType").(string) // Convert domain type to libvirt driver var driver string switch domainType { case "none", "tcg": driver = "qemu" case "kvm": driver = domainType default: return "", nil, fmt.Errorf("Unknown libvirt domain type: %s", domainType) } // Create the metadata metadata = map[string]interface{}{ "provider": "libvirt", "format": format, "virtual_size": size, } vagrantfile = fmt.Sprintf(libvirtVagrantfile, driver) return }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { files := artifact.Files() var h hash.Hash var checksumFile string newartifact := NewArtifact(artifact.Files()) for _, ct := range p.config.ChecksumTypes { h = getHash(ct) for _, art := range files { checksumFile = p.config.OutputPath if _, err := os.Stat(checksumFile); err != nil { newartifact.files = append(newartifact.files, checksumFile) } if err := os.MkdirAll(filepath.Dir(checksumFile), os.FileMode(0755)); err != nil { return nil, false, fmt.Errorf("unable to create dir: %s", err.Error()) } fw, err := os.OpenFile(checksumFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(0644)) if err != nil { return nil, false, fmt.Errorf("unable to create file %s: %s", checksumFile, err.Error()) } fr, err := os.Open(art) if err != nil { fw.Close() return nil, false, fmt.Errorf("unable to open file %s: %s", art, err.Error()) } if _, err = io.Copy(h, fr); err != nil { fr.Close() fw.Close() return nil, false, fmt.Errorf("unable to compute %s hash for %s", ct, art) } fr.Close() fw.WriteString(fmt.Sprintf("%x\t%s\n", h.Sum(nil), filepath.Base(art))) fw.Close() h.Reset() } } return newartifact, true, nil }
// Convert a QEMU raw/qcow2 artifact to a VHD file. func (p *QEMUProvider) Convert(ui packer.Ui, artifact packer.Artifact, outputPath string) error { // Find QEMU image. img, err := findImage(artifact.Files()...) if err != nil { return err } ui.Message(fmt.Sprintf("Found QEMU image: %s", img)) // Convert image to VHD. ui.Message("Converting image to VHD...") command := []string{ "convert", "-O", "vpc", img, outputPath, } if err = p.Execute(ui, command...); err != nil { return fmt.Errorf("Error creating VHD: %s", err) } return nil }
func (p *VBoxProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { // Create the metadata metadata = map[string]interface{}{"provider": "virtualbox"} // Copy all of the original contents into the temporary directory for _, path := range artifact.Files() { // We treat OVA files specially, we unpack those into the temporary // directory so we can get the resulting disk and OVF. if extension := filepath.Ext(path); extension == ".ova" { ui.Message(fmt.Sprintf("Unpacking OVA: %s", path)) if err = DecompressOva(dir, path); err != nil { return } } else { ui.Message(fmt.Sprintf("Copying from artifact: %s", path)) dstPath := filepath.Join(dir, filepath.Base(path)) if err = CopyContents(dstPath, path); err != nil { return } } } // Rename the OVF file to box.ovf, as required by Vagrant ui.Message("Renaming the OVF to box.ovf...") if err = p.renameOVF(dir); err != nil { return } // Create the Vagrantfile from the template var baseMacAddress string baseMacAddress, err = p.findBaseMacAddress(dir) if err != nil { return } vagrantfile = fmt.Sprintf(vboxVagrantfile, baseMacAddress) return }
func (p *ParallelsProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { // Create the metadata metadata = map[string]interface{}{"provider": "parallels"} // Copy all of the original contents into the temporary directory for _, path := range artifact.Files() { // If the file isn't critical to the function of the // virtual machine, we get rid of it. unnecessary := false for _, unnecessaryPat := range UnnecessaryFilesPatterns { if matched, _ := regexp.MatchString(unnecessaryPat, path); matched { unnecessary = true break } } if unnecessary { continue } tmpPath := filepath.ToSlash(path) pathRe := regexp.MustCompile(`^(.+?)([^/]+\.pvm/.+?)$`) matches := pathRe.FindStringSubmatch(tmpPath) var pvmPath string if matches != nil { pvmPath = filepath.FromSlash(matches[2]) } else { continue // Just copy a pvm } dstPath := filepath.Join(dir, pvmPath) ui.Message(fmt.Sprintf("Copying: %s", path)) if err = CopyContents(dstPath, path); err != nil { return } } return }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { // Only accepts input from the vagrant post-processor if artifact.BuilderId() != "mitchellh.post-processor.vagrant" { return nil, false, fmt.Errorf( "Unknown artifact type, requires box from vagrant post-processor: %s", artifact.BuilderId()) } // We assume that there is only one .box file to upload if !strings.HasSuffix(artifact.Files()[0], ".box") { return nil, false, fmt.Errorf( "Unknown files in artifact from vagrant post-processor: %s", artifact.Files()) } // create the HTTP client p.client = VagrantCloudClient{}.New(p.config.VagrantCloudUrl, p.config.AccessToken) // The name of the provider for vagrant cloud, and vagrant providerName := providerFromBuilderName(artifact.Id()) // Set up the state state := new(multistep.BasicStateBag) state.Put("config", p.config) state.Put("client", p.client) state.Put("artifact", artifact) state.Put("artifactFilePath", artifact.Files()[0]) state.Put("ui", ui) state.Put("providerName", providerName) // Build the steps steps := []multistep.Step{ new(stepVerifyBox), new(stepCreateVersion), new(stepCreateProvider), new(stepPrepareUpload), new(stepUpload), new(stepVerifyUpload), new(stepReleaseVersion), } // Run the steps if p.config.PackerDebug { p.runner = &multistep.DebugRunner{ Steps: steps, PauseFn: common.MultistepDebugFn(ui), } } else { p.runner = &multistep.BasicRunner{Steps: steps} } p.runner.Run(state) // If there was an error, return that if rawErr, ok := state.GetOk("error"); ok { return nil, false, rawErr.(error) } return NewArtifact(providerName, p.config.Tag), true, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if len(artifact.Files()) > 0 { ui.Say(fmt.Sprintf("Discarding artifact files: %s", strings.Join(artifact.Files(), ", "))) } artifact, err := NewArtifact(p.config.Files) ui.Say(fmt.Sprintf("Using these artifact files: %s", strings.Join(artifact.Files(), ", "))) return artifact, true, err }
func (p *VMwarevCenterProvider) Process(ui packer.Ui, artifact packer.Artifact, dir string) (vagrantfile string, metadata map[string]interface{}, err error) { // Create the metadata metadata = map[string]interface{}{"provider": "vcenter"} vmx := "" ovf := "" basepath := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".vmx") { vmx = path ovf = filepath.Base(vmx[:len(vmx)-4] + ".ovf") basepath = filepath.Dir(path) + "/ovf" } } vmxData, err := vmwcommon.ReadVMX(vmx) if err != nil { ui.Message(fmt.Sprintf("err: %s", err)) } for k, _ := range vmxData { if strings.HasPrefix(k, "floppy0.") { ui.Message(fmt.Sprintf("Deleting key: %s", k)) delete(vmxData, k) } if strings.HasPrefix(k, "ide1:0.") { ui.Message(fmt.Sprintf("Deleting key: %s", k)) delete(vmxData, k) } if strings.HasPrefix(k, "ide1:1.") { ui.Message(fmt.Sprintf("Deleting key: %s", k)) delete(vmxData, k) } } // remove floppy (again) ui.Message(fmt.Sprintf("Setting key: floppy0.present = FALSE")) vmxData["floppy0.present"] = "FALSE" // detach DVD (again) ui.Message(fmt.Sprintf("Setting key: ide1:0.present = FALSE")) vmxData["ide1:0.present"] = "FALSE" // Rewrite the VMX if err := vmwcommon.WriteVMX(vmx, vmxData); err != nil { ui.Message(fmt.Sprintf("err: %s", err)) } program, err := FindOvfTool() sourcetype := "--sourceType=VMX" targettype := "--targetType=OVF" ui.Message(fmt.Sprintf("Creating directory: %s", basepath)) if err := os.Mkdir(basepath, 0755); err != nil { ui.Message(fmt.Sprintf("err: %s", err)) } cmd := exec.Command(program, sourcetype, targettype, vmx, basepath+"/"+ovf) ui.Message(fmt.Sprintf("Starting ovftool")) cmd.Start() cmd.Wait() ui.Message(fmt.Sprintf("Reading files in %s", basepath)) files, _ := ioutil.ReadDir(basepath) for _, path := range files { ui.Message(fmt.Sprintf("Copying: %s", path.Name())) dstPath := filepath.Join(dir, path.Name()) if err = CopyContents(dstPath, basepath+"/"+path.Name()); err != nil { return } } return }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { scripts := make([]string, len(p.config.Scripts)) copy(scripts, p.config.Scripts) // If we have an inline script, then turn that into a temporary // shell script and use that. if p.config.Inline != nil { tf, err := ioutil.TempFile("", "packer-shell") if err != nil { return nil, false, fmt.Errorf("Error preparing shell script: %s", err) } defer os.Remove(tf.Name()) // Set the path to the temporary file scripts = append(scripts, tf.Name()) // Write our contents to it writer := bufio.NewWriter(tf) writer.WriteString(fmt.Sprintf("#!%s\n", p.config.InlineShebang)) for _, command := range p.config.Inline { if _, err := writer.WriteString(command + "\n"); err != nil { return nil, false, fmt.Errorf("Error preparing shell script: %s", err) } } if err := writer.Flush(); err != nil { return nil, false, fmt.Errorf("Error preparing shell script: %s", err) } tf.Close() } // Build our variables up by adding in the build name and builder type envVars := make([]string, len(p.config.Vars)+2) envVars[0] = fmt.Sprintf("PACKER_BUILD_NAME='%s'", p.config.PackerBuildName) envVars[1] = fmt.Sprintf("PACKER_BUILDER_TYPE='%s'", p.config.PackerBuilderType) copy(envVars[2:], p.config.Vars) for _, file := range artifact.Files() { for _, script := range scripts { // Flatten the environment variables flattendVars := strings.Join(envVars, " ") p.config.ctx.Data = &ExecuteCommandTemplate{ Vars: flattendVars, Script: script, Artifact: file, } command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) if err != nil { return nil, false, fmt.Errorf("Error processing command: %s", err) } ui.Say(fmt.Sprintf("Post processing with local shell script: %s", command)) comm := &Communicator{} cmd := &packer.RemoteCmd{Command: command} ui.Say(fmt.Sprintf( "Executing local script: %s", script)) if err := cmd.StartWithUi(comm, ui); err != nil { return nil, false, fmt.Errorf( "Error executing script: %s\n\n"+ "Please see output above for more information.", script) } if cmd.ExitStatus != 0 { return nil, false, fmt.Errorf( "Erroneous exit code %d while executing script: %s\n\n"+ "Please see output above for more information.", cmd.ExitStatus, script) } } } return artifact, true, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { var err error config, err := p.config.Config() if err != nil { return nil, false, err } // Render this key since we didn't in the configure phase p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx) if err != nil { return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err) } log.Printf("Rendered s3_key_name as %s", p.config.S3Key) log.Println("Looking for OVA in artifact") // Locate the files output from the builder source := "" for _, path := range artifact.Files() { if strings.HasSuffix(path, ".ova") { source = path break } } // Hope we found something useful if source == "" { return nil, false, fmt.Errorf("No OVA file found in artifact from builder") } // Set up the AWS session log.Println("Creating AWS session") session := session.New(config) // open the source file log.Printf("Opening file %s to upload", source) file, err := os.Open(source) if err != nil { return nil, false, fmt.Errorf("Failed to open %s: %s", source, err) } ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) // Copy the OVA file into the S3 bucket specified uploader := s3manager.NewUploader(session) _, err = uploader.Upload(&s3manager.UploadInput{ Body: file, Bucket: &p.config.S3Bucket, Key: &p.config.S3Key, }) if err != nil { return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err) } // May as well stop holding this open now file.Close() ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key)) // Call EC2 image import process log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key) ec2conn := ec2.New(session) import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{ DiskContainers: []*ec2.ImageDiskContainer{ { UserBucket: &ec2.UserBucket{ S3Bucket: &p.config.S3Bucket, S3Key: &p.config.S3Key, }, }, }, }) if err != nil { return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) } ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId)) // Wait for import process to complete, this takess a while ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId)) stateChange := awscommon.StateChangeConf{ Pending: []string{"pending", "active"}, Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId), Target: "completed", } // Actually do the wait for state change // We ignore errors out of this and check job state in AWS API awscommon.WaitForState(&stateChange) // Retrieve what the outcome was for the import task import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{ ImportTaskIds: []*string{ import_start.ImportTaskId, }, }) if err != nil { return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err) } // Check it was actually completed if *import_result.ImportImageTasks[0].Status != "completed" { // The most useful error message is from the job itself return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage) } ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId)) // Pull AMI ID out of the completed job createdami := *import_result.ImportImageTasks[0].ImageId // If we have tags, then apply them now to both the AMI and snaps // created by the import if len(p.config.Tags) > 0 { var ec2Tags []*ec2.Tag log.Printf("Repacking tags into AWS format") for key, value := range p.config.Tags { ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value)) ec2Tags = append(ec2Tags, &ec2.Tag{ Key: aws.String(key), Value: aws.String(value), }) } resourceIds := []*string{&createdami} log.Printf("Getting details of %s", createdami) imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{ ImageIds: resourceIds, }) if err != nil { return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err) } if len(imageResp.Images) == 0 { return nil, false, fmt.Errorf("AMI %s has no images", createdami) } image := imageResp.Images[0] log.Printf("Walking block device mappings for %s to find snapshots", createdami) for _, device := range image.BlockDeviceMappings { if device.Ebs != nil && device.Ebs.SnapshotId != nil { ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId)) resourceIds = append(resourceIds, device.Ebs.SnapshotId) } } ui.Message(fmt.Sprintf("Tagging AMI %s", createdami)) _, err = ec2conn.CreateTags(&ec2.CreateTagsInput{ Resources: resourceIds, Tags: ec2Tags, }) if err != nil { return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err) } } // Add the reported AMI ID to the artifact list log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region) artifact = &awscommon.Artifact{ Amis: map[string]string{ *config.Region: createdami, }, BuilderIdValue: BuilderId, Conn: ec2conn, } if !p.config.SkipClean { ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key)) s3conn := s3.New(session) _, err = s3conn.DeleteObject(&s3.DeleteObjectInput{ Bucket: &p.config.S3Bucket, Key: &p.config.S3Key, }) if err != nil { return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err) } } return artifact, false, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { target := p.config.OutputPath keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} outputFile, err := os.Create(target) if err != nil { return nil, false, fmt.Errorf( "Unable to create archive %s: %s", target, err) } defer outputFile.Close() // Setup output interface. If we're using compression, output is a // compression writer. Otherwise it's just a file. var output io.WriteCloser switch p.config.Algorithm { case "lz4": ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s", runtime.GOMAXPROCS(-1), target)) output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) defer output.Close() case "pgzip": ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s", runtime.GOMAXPROCS(-1), target)) output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) defer output.Close() default: output = outputFile } compression := p.config.Algorithm if compression == "" { compression = "no compression" } // Build an archive, if we're supposed to do that. switch p.config.Archive { case "tar": ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) err = createTarArchive(artifact.Files(), output) if err != nil { return nil, keep, fmt.Errorf("Error creating tar: %s", err) } case "zip": ui.Say(fmt.Sprintf("Zipping %s", target)) err = createZipArchive(artifact.Files(), output) if err != nil { return nil, keep, fmt.Errorf("Error creating zip: %s", err) } default: // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { return nil, keep, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } archiveFile := artifact.Files()[0] ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression)) source, err := os.Open(archiveFile) if err != nil { return nil, keep, fmt.Errorf( "Failed to open source file %s for reading: %s", archiveFile, err) } defer source.Close() if _, err = io.Copy(output, source); err != nil { return nil, keep, fmt.Errorf("Failed to compress %s: %s", archiveFile, err) } } ui.Say(fmt.Sprintf("Archive %s completed", target)) return newArtifact, keep, nil }
func (p *VBoxBoxPostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, error) { var err error tplData := &VBoxVagrantfileTemplate{} tplData.BaseMacAddress, err = p.findBaseMacAddress(artifact) if err != nil { return nil, err } // Compile the output path outputPath, err := ProcessOutputPath(p.config.OutputPath, "virtualbox", artifact) if err != nil { return nil, err } // Create a temporary directory for us to build the contents of the box in dir, err := ioutil.TempDir("", "packer") if err != nil { return nil, err } defer os.RemoveAll(dir) // Copy all of the original contents into the temporary directory for _, path := range artifact.Files() { ui.Message(fmt.Sprintf("Copying: %s", path)) src, err := os.Open(path) if err != nil { return nil, err } defer src.Close() dst, err := os.Create(filepath.Join(dir, filepath.Base(path))) if err != nil { return nil, err } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { return nil, err } } // Create the Vagrantfile from the template vf, err := os.Create(filepath.Join(dir, "Vagrantfile")) if err != nil { return nil, err } defer vf.Close() vagrantfileContents := defaultVBoxVagrantfile if p.config.VagrantfileTemplate != "" { f, err := os.Open(p.config.VagrantfileTemplate) if err != nil { return nil, err } defer f.Close() contents, err := ioutil.ReadAll(f) if err != nil { return nil, err } vagrantfileContents = string(contents) } t := template.Must(template.New("vagrantfile").Parse(vagrantfileContents)) t.Execute(vf, tplData) vf.Close() // Create the metadata metadata := map[string]string{"provider": "virtualbox"} if err := WriteMetadata(dir, metadata); err != nil { return nil, err } // Compress the directory to the given output path ui.Message(fmt.Sprintf("Compressing box...")) if err := DirToBox(outputPath, dir); err != nil { return nil, err } return NewArtifact("virtualbox", outputPath), nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { // These are extra variables that will be made available for interpolation. p.config.ctx.Data = map[string]string{ "BuildName": p.config.PackerBuildName, "BuilderType": p.config.PackerBuilderType, } target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) if err != nil { return nil, false, fmt.Errorf("Error interpolating output value: %s", err) } else { fmt.Println(target) } keep := p.config.KeepInputArtifact newArtifact := &Artifact{Path: target} if err = os.MkdirAll(filepath.Dir(target), os.FileMode(0755)); err != nil { return nil, false, fmt.Errorf( "Unable to create dir for archive %s: %s", target, err) } outputFile, err := os.Create(target) if err != nil { return nil, false, fmt.Errorf( "Unable to create archive %s: %s", target, err) } defer outputFile.Close() // Setup output interface. If we're using compression, output is a // compression writer. Otherwise it's just a file. var output io.WriteCloser switch p.config.Algorithm { case "bgzf": ui.Say(fmt.Sprintf("Using bgzf compression with %d cores for %s", runtime.GOMAXPROCS(-1), target)) output, err = makeBGZFWriter(outputFile, p.config.CompressionLevel) defer output.Close() case "lz4": ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s", runtime.GOMAXPROCS(-1), target)) output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) defer output.Close() case "pgzip": ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s", runtime.GOMAXPROCS(-1), target)) output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) defer output.Close() default: output = outputFile } compression := p.config.Algorithm if compression == "" { compression = "no compression" } // Build an archive, if we're supposed to do that. switch p.config.Archive { case "tar": ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) err = createTarArchive(artifact.Files(), output) if err != nil { return nil, keep, fmt.Errorf("Error creating tar: %s", err) } case "zip": ui.Say(fmt.Sprintf("Zipping %s", target)) err = createZipArchive(artifact.Files(), output) if err != nil { return nil, keep, fmt.Errorf("Error creating zip: %s", err) } default: // Filename indicates no tarball (just compress) so we'll do an io.Copy // into our compressor. if len(artifact.Files()) != 1 { return nil, keep, fmt.Errorf( "Can only have 1 input file when not using tar/zip. Found %d "+ "files: %v", len(artifact.Files()), artifact.Files()) } archiveFile := artifact.Files()[0] ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression)) source, err := os.Open(archiveFile) if err != nil { return nil, keep, fmt.Errorf( "Failed to open source file %s for reading: %s", archiveFile, err) } defer source.Close() if _, err = io.Copy(output, source); err != nil { return nil, keep, fmt.Errorf("Failed to compress %s: %s", archiveFile, err) } } ui.Say(fmt.Sprintf("Archive %s completed", target)) return newArtifact, keep, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { // Only accept input from the vagrant post-processor if artifact.BuilderId() != "mitchellh.post-processor.vagrant" { return nil, false, fmt.Errorf("Unknown artifact type, requires box from vagrant post-processor: %s", artifact.BuilderId()) } // Assume there is only one .box file to upload box := artifact.Files()[0] if !strings.HasSuffix(box, ".box") { return nil, false, fmt.Errorf("Unknown files in artifact from vagrant post-processor: %s", artifact.Files()) } provider := providerFromBuilderName(artifact.Id()) ui.Say(fmt.Sprintf("Preparing to upload box for '%s' provider to S3 bucket '%s'", provider, p.config.Bucket)) // open the box so we can upload to S3 and calculate checksum for the manifest file, err := os.Open(box) if err != nil { return nil, false, err } defer file.Close() // get the file's size info, err := file.Stat() if err != nil { return nil, false, err } size := info.Size() ui.Message(fmt.Sprintf("Box to upload: %s (%d bytes)", box, size)) // get the latest manifest so we can add to it ui.Message("Fetching latest manifest") manifest, err := p.getManifest() if err != nil { return nil, false, err } // generate the path to store the box in S3 boxPath := fmt.Sprintf("%s/%s/%s", p.config.BoxDir, p.config.Version, path.Base(box)) ui.Message("Generating checksum") checksum, err := sum256(file) if err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Checksum is %s", checksum)) ui.Message(fmt.Sprintf("Adding %s %s box to manifest", provider, p.config.Version)) if err := manifest.add(p.config.Version, &Provider{ Name: provider, Url: p.s3.URL(boxPath), ChecksumType: "sha256", Checksum: checksum, }); err != nil { return nil, false, err } // upload the box to S3 (rewinding as we already read the file to generate the checksum) ui.Message(fmt.Sprintf("Uploading box to S3: %s", boxPath)) if _, err := file.Seek(0, 0); err != nil { return nil, false, err } if size > 100*1024*1024 { ui.Message("File size > 100MB. Initiating multipart upload") multi, err := p.s3.InitMulti(boxPath, "application/octet-stream", p.config.ACL) if err != nil { return nil, false, err } ui.Message("Uploading...") const chunkSize = 5 * 1024 * 1024 totalParts := int(math.Ceil(float64(size) / float64(chunkSize))) totalUploadSize := int64(0) parts := make([]s3.Part, totalParts) errorCount := 0 for partNum := int(1); partNum <= totalParts; partNum++ { filePos, err := file.Seek(0, 1) partSize := int64(math.Min(chunkSize, float64(size-filePos))) partBuffer := make([]byte, partSize) ui.Message(fmt.Sprintf("Upload: Uploading part %d of %d, %d (of max %d) bytes", partNum, int(totalParts), int(partSize), int(chunkSize))) readBytes, err := file.Read(partBuffer) ui.Message(fmt.Sprintf("Upload: Read %d bytes from box file on disk", readBytes)) bufferReader := bytes.NewReader(partBuffer) part, err := multi.PutPart(partNum, bufferReader) parts[partNum-1] = part if err != nil { if errorCount < 10 { errorCount++ ui.Message(fmt.Sprintf("Error encountered! %s. Retry %d.", err, errorCount)) time.Sleep(5 * time.Second) //reset seek position to the beginning of this block file.Seek(filePos, 0) partNum-- } else { ui.Message(fmt.Sprintf("Too many errors encountered! Aborting.", err, errorCount)) return nil, false, err } } else { totalUploadSize += part.Size ui.Message(fmt.Sprintf("Upload: Finished part %d of %d, upload total is %d bytes. This part was %d bytes.", partNum, totalParts, int(totalUploadSize), int(part.Size))) } } ui.Message("Parts uploaded, completing upload...") if err := multi.Complete(parts); err != nil { return nil, false, err } } else { if err := p.s3.PutReader(boxPath, file, size, "application/octet-stream", p.config.ACL); err != nil { return nil, false, err } } ui.Message(fmt.Sprintf("Uploading the manifest: %s", p.config.ManifestPath)) if err := p.putManifest(manifest); err != nil { return nil, false, err } return &Artifact{p.s3.URL(p.config.ManifestPath)}, true, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packer.Artifact, bool, error) { artifact := &Artifact{} var err error var fi os.FileInfo // Create the current artifact. for _, name := range source.Files() { af := ArtifactFile{} if fi, err = os.Stat(name); err == nil { af.Size = fi.Size() } if p.config.StripPath { af.Name = filepath.Base(name) } else { af.Name = name } artifact.ArtifactFiles = append(artifact.ArtifactFiles, af) } artifact.ArtifactId = source.Id() artifact.BuilderType = p.config.PackerBuilderType artifact.BuildName = p.config.PackerBuildName artifact.BuildTime = time.Now().Unix() // Since each post-processor runs in a different process we need a way to // coordinate between various post-processors in a single packer run. We do // this by setting a UUID per run and tracking this in the manifest file. // When we detect that the UUID in the file is the same, we know that we are // part of the same run and we simply add our data to the list. If the UUID // is different we will check the -force flag and decide whether to truncate // the file before we proceed. artifact.PackerRunUUID = os.Getenv("PACKER_RUN_UUID") // Create a lock file with exclusive access. If this fails we will retry // after a delay. lockFilename := p.config.Filename + ".lock" for i := 0; i < 3; i++ { // The file should not be locked for very long so we'll keep this short. time.Sleep((time.Duration(i) * 200 * time.Millisecond)) _, err = os.OpenFile(lockFilename, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err == nil { break } log.Printf("Error locking manifest file for reading and writing. Will sleep and retry. %s", err) } defer os.Remove(lockFilename) // TODO fix error on first run: // * Post-processor failed: open packer-manifest.json: no such file or directory // // Read the current manifest file from disk contents := []byte{} if contents, err = ioutil.ReadFile(p.config.Filename); err != nil && !os.IsNotExist(err) { return source, true, fmt.Errorf("Unable to open %s for reading: %s", p.config.Filename, err) } // Parse the manifest file JSON, if we have one manifestFile := &ManifestFile{} if len(contents) > 0 { if err = json.Unmarshal(contents, manifestFile); err != nil { return source, true, fmt.Errorf("Unable to parse content from %s: %s", p.config.Filename, err) } } // If -force is set and we are not on same run, truncate the file. Otherwise // we will continue to add new builds to the existing manifest file. if p.config.PackerForce && os.Getenv("PACKER_RUN_UUID") != manifestFile.LastRunUUID { manifestFile = &ManifestFile{} } // Add the current artifact to the manifest file manifestFile.Builds = append(manifestFile.Builds, *artifact) manifestFile.LastRunUUID = os.Getenv("PACKER_RUN_UUID") // Write JSON to disk if out, err := json.MarshalIndent(manifestFile, "", " "); err == nil { if err = ioutil.WriteFile(p.config.Filename, out, 0664); err != nil { return source, true, fmt.Errorf("Unable to write %s: %s", p.config.Filename, err) } } else { return source, true, fmt.Errorf("Unable to marshal JSON %s", err) } return source, true, nil }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { // Only accept input from the vagrant post-processor if artifact.BuilderId() != "mitchellh.post-processor.vagrant" { return nil, false, fmt.Errorf("Unknown artifact type, requires box from vagrant post-processor: %s", artifact.BuilderId()) } // Assume there is only one .box file to upload box := artifact.Files()[0] if !strings.HasSuffix(box, ".box") { return nil, false, fmt.Errorf("Unknown files in artifact from vagrant post-processor: %s", artifact.Files()) } provider := providerFromBuilderName(artifact.Id()) ui.Say(fmt.Sprintf("Preparing to copy box for '%s' provider to path '%s'", provider, p.config.Path)) // open the box so we can copy to path and calculate checksum for the manifest file, err := os.Open(box) if err != nil { return nil, false, err } defer file.Close() // get the file's size info, err := file.Stat() if err != nil { return nil, false, err } size := info.Size() ui.Message(fmt.Sprintf("Box to copy: %s (%d bytes)", box, size)) // get the latest manifest so we can add to it ui.Message("Fetching latest manifest") manifest, err := p.getManifest() if err != nil { return nil, false, err } // generate the path to copy the box to the path boxPath := fmt.Sprintf("%s/%s/%s/%s", p.config.Path, p.config.BoxDir, p.config.Version, path.Base(box)) ui.Message("Generating checksum") checksum, err := sum256(file) if err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Checksum is %s", checksum)) ui.Message(fmt.Sprintf("Adding %s %s box to manifest", provider, p.config.Version)) if err := manifest.add(p.config.Version, &Provider{ Name: provider, Url: boxPath, ChecksumType: "sha256", Checksum: checksum, }); err != nil { return nil, false, err } // upload the box to S3 (rewinding as we already read the file to generate the checksum) ui.Message(fmt.Sprintf("Copying box to path: %s", boxPath)) if _, err := file.Seek(0, 0); err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Opening box")) in, err := os.Open(box) if err != nil { return nil, false, err } defer in.Close() ui.Message(fmt.Sprintf("Creating directories")) err = os.MkdirAll(path.Dir(boxPath), 0777) if err != nil { return nil, false, err } ui.Message(fmt.Sprintf("Creating box copy")) out, err := os.Create(boxPath) if err != nil { return nil, false, err } defer out.Close() ui.Message(fmt.Sprintf("Copying box")) _, err = io.Copy(out, in) cerr := out.Close() if err != nil { return nil, false, err } if cerr != nil { return nil, false, cerr } ui.Message(fmt.Sprintf("Uploading the manifest: %s", p.config.ManifestPath)) if err := p.putManifest(manifest); err != nil { return nil, false, err } return &Artifact{p.config.ManifestPath}, true, nil }