func (b *Builder) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) if errs != nil && len(errs.Errors) > 0 { return errs } log.Println(common.ScrubConfig(b.config), b.config.AccessKey, b.config.SecretKey) return nil }
func (b *Builder) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return err } // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare()...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare()...) // Accumulate any errors if b.config.AMIName == "" { errs = packer.MultiErrorAppend( errs, errors.New("ami_name must be specified")) } else { _, err = template.New("ami").Parse(b.config.AMIName) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ami_name: %s", err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } log.Printf("Config: %+v", b.config) return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } if p.config.TempConfigDir == "" { p.config.TempConfigDir = DefaultTempConfigDir } // Accumulate any errors errs := common.CheckUnusedConfig(md) if p.config.LocalStateTree == "" { errs = packer.MultiErrorAppend(errs, errors.New("Please specify a local_state_tree")) } else if _, err := os.Stat(p.config.LocalStateTree); err != nil { errs = packer.MultiErrorAppend(errs, errors.New("local_state_tree must exist and be accessible")) } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.Password)) return nil, nil }
func (p *AWSBoxPostProcessor) Configure(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) validates := map[string]*string{ "output": &p.config.OutputPath, "vagrantfile_template": &p.config.VagrantfileTemplate, } for n, ptr := range validates { if err := p.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.WinRMConfig.Prepare(b.config.tpl)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } return nil, nil }
func (c *UserConfig) Validate() error { var err error c.tpl, err = packer.NewConfigTemplate() if err != nil { return err } c.tpl.UserVars = c.PackerUserVars errs := common.CheckUnusedConfig(c.metadata) err = c.validateDirPresence(c.AssetsDir, "assets_dir") if err != nil { errs = packer.MultiErrorAppend(errs, err) } if c.ManifestPath != nil { err = c.validateFilePresence(*c.ManifestPath, "manifest_path") if err != nil { errs = packer.MultiErrorAppend(errs, err) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) md, err := common.DecodeConfig(c, raws...) if err != nil { return nil, nil, err } c.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, nil, err } // Default Pull if it wasn't set hasPull := false for _, k := range md.Keys { if k == "Pull" { hasPull = true break } } if !hasPull { c.Pull = true } errs := common.CheckUnusedConfig(md) templates := map[string]*string{ "export_path": &c.ExportPath, "image": &c.Image, } for n, ptr := range templates { var err error *ptr, err = c.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if c.ExportPath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("export_path must be specified")) } if c.Image == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("image must be specified")) } if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } return c, nil, nil }
func (builder *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&builder.config, raws...) if err != nil { return nil, err } builder.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } builder.config.tpl.UserVars = builder.config.PackerUserVars builder.config.tpl.Funcs(awscommon.TemplateFuncs) if builder.config.VolumeSize == 0 { builder.config.VolumeSize = 12 } if builder.config.VolumeType == "" { builder.config.VolumeType = "standard" } if builder.config.RootDeviceName == "" { builder.config.RootDeviceName = "/dev/xvda" } builder.ensureWorkerDeviceMapping() // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, builder.config.AccessConfig.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.BlockDevices.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.AMIConfig.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.RunConfig.Prepare(builder.config.tpl)...) templates := map[string]*string{ "worker_device_name": &builder.config.WorkerDeviceName, "root_device_name": &builder.config.RootDeviceName, } for n, ptr := range templates { var err error *ptr, err = builder.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(builder.config, builder.config.AccessKey, builder.config.SecretKey)) return nil, nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) if p.config.StagingDir == "" { p.config.StagingDir = DefaultStagingDir } // Templates templates := map[string]*string{ "staging_dir": &p.config.StagingDir, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } // Validation err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) } for _, path := range p.config.PlaybookPaths { err := validateDirConfig(path, "playbook_paths") if err != nil { errs = packer.MultiErrorAppend(errs, err) } } for _, path := range p.config.RolePaths { if err := validateDirConfig(path, "role_paths"); err != nil { errs = packer.MultiErrorAppend(errs, err) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars if p.config.TempConfigDir == "" { p.config.TempConfigDir = DefaultTempConfigDir } // Accumulate any errors errs := common.CheckUnusedConfig(md) templates := map[string]*string{ "bootstrap_args": &p.config.BootstrapArgs, "minion_config": &p.config.MinionConfig, "local_state_tree": &p.config.LocalStateTree, "temp_config_dir": &p.config.TempConfigDir, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if p.config.LocalStateTree != "" { if _, err := os.Stat(p.config.LocalStateTree); err != nil { errs = packer.MultiErrorAppend(errs, errors.New("local_state_tree must exist and be accessible")) } } if p.config.MinionConfig != "" { if _, err := os.Stat(p.config.MinionConfig); err != nil { errs = packer.MultiErrorAppend(errs, errors.New("minion_config must exist and be accessible")) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (p *PostProcessor) configureSingle(config *Config, raws ...interface{}) error { md, err := common.DecodeConfig(config, raws...) if err != nil { return err } config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } config.tpl.UserVars = config.PackerUserVars // Defaults if config.OutputPath == "" { config.OutputPath = "packer_{{ .BuildName }}_{{.Provider}}.box" } found := false for _, k := range md.Keys { if k == "compression_level" { found = true break } } if !found { config.CompressionLevel = flate.DefaultCompression } // Accumulate any errors errs := common.CheckUnusedConfig(md) validates := map[string]*string{ "output": &config.OutputPath, "vagrantfile_template": &config.VagrantfileTemplate, "provider": &config.Provider, } for n, ptr := range validates { if err := config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) md, err := common.DecodeConfig(c, raws...) if err != nil { return nil, nil, err } c.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, nil, err } c.tpl.UserVars = c.PackerUserVars // Defaults if c.Port == 0 { c.Port = 22 } // (none so far) errs := common.CheckUnusedConfig(md) if c.Host == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("host must be specified")) } if c.SSHUsername == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("ssh_username must be specified")) } if c.SSHPassword == "" && c.SSHPrivateKeyFile == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("one of ssh_password and ssh_private_key_file must be specified")) } if c.SSHPassword != "" && c.SSHPrivateKeyFile != "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("only one of ssh_password and ssh_private_key_file must be specified")) } if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } return c, nil, nil }
func (b *Builder) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) // Accumulate any errors newTags := make(map[string]string) for k, v := range b.config.Tags { k, err = b.config.tpl.Process(k, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing tag key %s: %s", k, err)) continue } v, err = b.config.tpl.Process(v, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing tag value '%s': %s", v, err)) continue } newTags[k] = v } b.config.Tags = newTags if errs != nil && len(errs.Errors) > 0 { return errs } log.Printf("Config: %+v", b.config) return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) templates := map[string]*string{ "source": &p.config.Source, "destination": &p.config.Destination, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if _, err := os.Stat(p.config.Source); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad source '%s': %s", p.config.Source, err)) } if p.config.Destination == "" { errs = packer.MultiErrorAppend(errs, errors.New("Destination must be specified.")) } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
// Prepare processes the build configuration parameters. func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } log.Println(fmt.Sprintf("%s: %v", "PackerUserVars", b.config.PackerUserVars)) b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors and warnings errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) warnings := make([]string, 0) if b.config.DiskSizeGB == 0 { b.config.DiskSizeGB = 40 } log.Println(fmt.Sprintf("%s: %v", "DiskSize", b.config.DiskSizeGB)) if b.config.DiskSizeGB < 10 { errs = packer.MultiErrorAppend(errs, fmt.Errorf("disk_size_gb: Windows server requires disk space >= 10 GB, but defined: %v", b.config.DiskSizeGB)) } else if b.config.DiskSizeGB > 65536 { errs = packer.MultiErrorAppend(errs, fmt.Errorf("disk_size_gb: Windows server requires disk space <= 65536 GB, but defined: %v", b.config.DiskSizeGB)) } if b.config.RamSizeMB == 0 { b.config.RamSizeMB = 1024 } log.Println(fmt.Sprintf("%s: %v", "RamSize", b.config.RamSizeMB)) var ramMinMb uint = 512 var ramMaxMb uint = 6538 if b.config.RamSizeMB < ramMinMb { errs = packer.MultiErrorAppend(errs, fmt.Errorf("ram_size_mb: Windows server requires memory size >= %v MB, but defined: %v", ramMinMb, b.config.RamSizeMB)) } else if b.config.RamSizeMB > ramMaxMb { errs = packer.MultiErrorAppend(errs, fmt.Errorf("ram_size_mb: Windows server requires memory size <= %v MB, but defined: %v", ramMaxMb, b.config.RamSizeMB)) } warnings = appendWarnings(warnings, fmt.Sprintf("Hyper-V might fail to create a VM if there is no available memory in the system.")) if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("pvm_%s", uuid.New()) } if b.config.SwitchName == "" { b.config.SwitchName = fmt.Sprintf("pis_%s", uuid.New()) } if b.config.SleepTimeMinutes == 0 { b.config.SleepTimeMinutes = 10 } else if b.config.SleepTimeMinutes < 0 { errs = packer.MultiErrorAppend(errs, fmt.Errorf("wait_time_minutes: '%v' %s", int64(b.config.SleepTimeMinutes), "the value can't be negative")) } else if b.config.SleepTimeMinutes > 1440 { errs = packer.MultiErrorAppend(errs, fmt.Errorf("wait_time_minutes: '%v' %s", uint(b.config.SleepTimeMinutes), "the value is too big")) } else if b.config.SleepTimeMinutes > 120 { warnings = appendWarnings(warnings, fmt.Sprintf("wait_time_minutes: '%v' %s", uint(b.config.SleepTimeMinutes), "You may want to decrease the value. Usually 20 min is enough.")) } log.Println(fmt.Sprintf("%s: %v", "SleepTimeMinutes", uint(b.config.SleepTimeMinutes))) // Errors templates := map[string]*string{ "iso_url": &b.config.RawSingleISOUrl, "product_key": &b.config.ProductKey, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing %s: %s", n, err)) } } pk := strings.TrimSpace(b.config.ProductKey) if len(pk) != 0 { pattern := "^[A-Z0-9]{5}-[A-Z0-9]{5}-[A-Z0-9]{5}-[A-Z0-9]{5}-[A-Z0-9]{5}$" value := pk match, _ := regexp.MatchString(pattern, value) if !match { errs = packer.MultiErrorAppend(errs, fmt.Errorf("product_key: Make sure the product_key follows the pattern: XXXXX-XXXXX-XXXXX-XXXXX-XXXXX")) } warnings = appendWarnings(warnings, fmt.Sprintf("product_key: %s", "value is not empty. Packer will try to activate Windows with the product key. To do this Packer will need an Internet connection.")) } log.Println(fmt.Sprintf("%s: %v", "VMName", b.config.VMName)) log.Println(fmt.Sprintf("%s: %v", "SwitchName", b.config.SwitchName)) log.Println(fmt.Sprintf("%s: %v", "ProductKey", b.config.ProductKey)) if b.config.RawSingleISOUrl == "" { errs = packer.MultiErrorAppend(errs, errors.New("iso_url: The option can't be missed and a path must be specified.")) } else if _, err := os.Stat(b.config.RawSingleISOUrl); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("iso_url: Check the path is correct")) } log.Println(fmt.Sprintf("%s: %v", "RawSingleISOUrl", b.config.RawSingleISOUrl)) guestOSTypesIsValid := false guestOSTypes := []string{ WS2012R2DC, // WS2012R2St, } log.Println(fmt.Sprintf("%s: %v", "GuestOSType", b.config.GuestOSType)) for _, guestType := range guestOSTypes { if b.config.GuestOSType == guestType { guestOSTypesIsValid = true break } } if !guestOSTypesIsValid { errs = packer.MultiErrorAppend(errs, fmt.Errorf("guest_os_type: The value is invalid. Must be one of: %v", guestOSTypes)) } if errs != nil && len(errs.Errors) > 0 { return warnings, errs } return warnings, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors and warnings errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend( errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(b.config.tpl)...) warnings := make([]string, 0) if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } if b.config.ParallelsToolsMode == "" { b.config.ParallelsToolsMode = "upload" } if b.config.ParallelsToolsGuestPath == "" { b.config.ParallelsToolsGuestPath = "prl-tools.iso" } if b.config.ParallelsToolsHostPath == "" { b.config.ParallelsToolsHostPath = "/Applications/Parallels Desktop.app/Contents/Resources/Tools/prl-tools-other.iso" } if b.config.HardDriveInterface == "" { b.config.HardDriveInterface = "sata" } if b.config.GuestOSType == "" { b.config.GuestOSType = "other" } if b.config.GuestOSDistribution == "" { b.config.GuestOSDistribution = "other" } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if len(b.config.HostInterfaces) == 0 { b.config.HostInterfaces = []string{"en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", "ppp0", "ppp1", "ppp2"} } if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } // Errors templates := map[string]*string{ "parallels_tools_mode": &b.config.ParallelsToolsMode, "parallels_tools_host_path": &b.config.ParallelsToolsHostPath, "parallels_tools_guest_path": &b.config.ParallelsToolsGuestPath, "guest_os_type": &b.config.GuestOSType, "guest_os_distribution": &b.config.GuestOSDistribution, "hard_drive_interface": &b.config.HardDriveInterface, "http_directory": &b.config.HTTPDir, "iso_checksum": &b.config.ISOChecksum, "iso_checksum_type": &b.config.ISOChecksumType, "iso_url": &b.config.RawSingleISOUrl, "vm_name": &b.config.VMName, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, url := range b.config.ISOUrls { var err error b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) } } validates := map[string]*string{ "parallels_tools_guest_path": &b.config.ParallelsToolsGuestPath, } for n, ptr := range validates { if err := b.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } for i, command := range b.config.BootCommand { if err := b.config.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) } if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) } else { b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) if b.config.ISOChecksumType != "none" { if b.config.ISOChecksum == "" { errs = packer.MultiErrorAppend( errs, errors.New("Due to large file sizes, an iso_checksum is required")) } else { b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) } if h := common.HashForType(b.config.ISOChecksumType); h == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) } } } if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("One of iso_url or iso_urls must be specified.")) } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { errs = packer.MultiErrorAppend( errs, errors.New("Only one of iso_url or iso_urls may be specified.")) } else if b.config.RawSingleISOUrl != "" { b.config.ISOUrls = []string{b.config.RawSingleISOUrl} } for i, url := range b.config.ISOUrls { b.config.ISOUrls[i], err = common.DownloadableURL(url) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) } } validMode := false validModes := []string{ parallelscommon.ParallelsToolsModeDisable, parallelscommon.ParallelsToolsModeAttach, parallelscommon.ParallelsToolsModeUpload, } for _, mode := range validModes { if b.config.ParallelsToolsMode == mode { validMode = true break } } if !validMode { errs = packer.MultiErrorAppend(errs, fmt.Errorf("parallels_tools_mode is invalid. Must be one of: %v", validModes)) } // Warnings if b.config.ISOChecksumType == "none" { warnings = append(warnings, "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ "a checksum is highly recommended.") } if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ "will forcibly halt the virtual machine, which may result in data loss.") } if errs != nil && len(errs.Errors) > 0 { return warnings, errs } return warnings, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.DriverConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.SSHConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.VMXConfig.Prepare(b.config.tpl)...) warnings := make([]string, 0) if b.config.DiskName == "" { b.config.DiskName = "disk" } if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } if b.config.DiskTypeId == "" { // Default is growable virtual disk split in 2GB files. b.config.DiskTypeId = "1" if b.config.RemoteType == "esx5" { b.config.DiskTypeId = "zeroedthick" } } if b.config.FloppyFiles == nil { b.config.FloppyFiles = make([]string, 0) } if b.config.GuestOSType == "" { b.config.GuestOSType = "other" } if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if b.config.VNCPortMin == 0 { b.config.VNCPortMin = 5900 } if b.config.VNCPortMax == 0 { b.config.VNCPortMax = 6000 } if b.config.RemoteUser == "" { b.config.RemoteUser = "******" } if b.config.RemoteDatastore == "" { b.config.RemoteDatastore = "datastore1" } if b.config.RemotePort == 0 { b.config.RemotePort = 22 } // Errors templates := map[string]*string{ "disk_name": &b.config.DiskName, "guest_os_type": &b.config.GuestOSType, "http_directory": &b.config.HTTPDir, "iso_checksum": &b.config.ISOChecksum, "iso_checksum_type": &b.config.ISOChecksumType, "iso_url": &b.config.RawSingleISOUrl, "vm_name": &b.config.VMName, "vmx_template_path": &b.config.VMXTemplatePath, "remote_type": &b.config.RemoteType, "remote_host": &b.config.RemoteHost, "remote_datastore": &b.config.RemoteDatastore, "remote_user": &b.config.RemoteUser, "remote_password": &b.config.RemotePassword, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, url := range b.config.ISOUrls { var err error b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) } } for i, command := range b.config.BootCommand { if err := b.config.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } for i, file := range b.config.FloppyFiles { var err error b.config.FloppyFiles[i], err = b.config.tpl.Process(file, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing floppy_files[%d]: %s", i, err)) } } if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) } else { b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) if b.config.ISOChecksumType != "none" { if b.config.ISOChecksum == "" { errs = packer.MultiErrorAppend( errs, errors.New("Due to large file sizes, an iso_checksum is required")) } else { b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) } if h := common.HashForType(b.config.ISOChecksumType); h == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) } } } if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("One of iso_url or iso_urls must be specified.")) } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { errs = packer.MultiErrorAppend( errs, errors.New("Only one of iso_url or iso_urls may be specified.")) } else if b.config.RawSingleISOUrl != "" { b.config.ISOUrls = []string{b.config.RawSingleISOUrl} } for i, url := range b.config.ISOUrls { b.config.ISOUrls[i], err = common.DownloadableURL(url) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) } } if b.config.VMXTemplatePath != "" { if err := b.validateVMXTemplatePath(); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("vmx_template_path is invalid: %s", err)) } } if b.config.VNCPortMin > b.config.VNCPortMax { errs = packer.MultiErrorAppend( errs, fmt.Errorf("vnc_port_min must be less than vnc_port_max")) } // Remote configuration validation if b.config.RemoteType != "" { if b.config.RemoteHost == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("remote_host must be specified")) } } // Warnings if b.config.ISOChecksumType == "none" { warnings = append(warnings, "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ "a checksum is highly recommended.") } if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ "will forcibly halt the virtual machine, which may result in data loss.") } if errs != nil && len(errs.Errors) > 0 { return warnings, errs } return warnings, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) // Optional configuration with defaults if b.config.APIKey == "" { // Default to environment variable for api_key, if it exists b.config.APIKey = os.Getenv("DIGITALOCEAN_API_KEY") } if b.config.ClientID == "" { // Default to environment variable for client_id, if it exists b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID") } if b.config.RegionID == 0 { // Default to Region "New York" b.config.RegionID = 1 } if b.config.SizeID == 0 { // Default to 512mb, the smallest droplet size b.config.SizeID = 66 } if b.config.ImageID == 0 { // Default to base image "Ubuntu 12.04.4 x64 (id: 3101045)" b.config.ImageID = 3101045 } if b.config.SnapshotName == "" { // Default to packer-{{ unix timestamp (utc) }} b.config.SnapshotName = "packer-{{timestamp}}" } if b.config.DropletName == "" { // Default to packer-[time-ordered-uuid] b.config.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } if b.config.SSHUsername == "" { // Default to "root". You can override this if your // SourceImage has a different user account then the DO default b.config.SSHUsername = "******" } if b.config.SSHPort == 0 { // Default to port 22 per DO default b.config.SSHPort = 22 } if b.config.RawSSHTimeout == "" { // Default to 1 minute timeouts b.config.RawSSHTimeout = "1m" } if b.config.RawStateTimeout == "" { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for droplet to become active b.config.RawStateTimeout = "6m" } templates := map[string]*string{ "client_id": &b.config.ClientID, "api_key": &b.config.APIKey, "snapshot_name": &b.config.SnapshotName, "droplet_name": &b.config.DropletName, "ssh_username": &b.config.SSHUsername, "ssh_timeout": &b.config.RawSSHTimeout, "state_timeout": &b.config.RawStateTimeout, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } // Required configurations that will display errors if not set if b.config.ClientID == "" { errs = packer.MultiErrorAppend( errs, errors.New("a client_id must be specified")) } if b.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("an api_key must be specified")) } sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } b.config.sshTimeout = sshTimeout stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } b.config.stateTimeout = stateTimeout if errs != nil && len(errs.Errors) > 0 { return nil, errs } common.ScrubConfig(b.config, b.config.ClientID, b.config.APIKey) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors and warnings errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.FloppyConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend( errs, b.config.OutputConfig.Prepare(b.config.tpl, &b.config.PackerConfig)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.PrlctlVersionConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ShutdownConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.WinRMConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ToolsConfig.Prepare(b.config.tpl)...) warnings := make([]string, 0) if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } if b.config.HardDriveInterface == "" { b.config.HardDriveInterface = "sata" } if b.config.GuestOSType == "" { b.config.GuestOSType = "other" } if b.config.GuestOSDistribution != "" { // Compatibility with older templates: // Use value of 'guest_os_distribution' if it is defined. b.config.GuestOSType = b.config.GuestOSDistribution warnings = append(warnings, "A 'guest_os_distribution' has been completely replaced with 'guest_os_type'\n"+ "It is recommended to remove it and assign the previous value to 'guest_os_type'.\n"+ "Run it to see all available values: `prlctl create x -d list` ") } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if len(b.config.HostInterfaces) == 0 { b.config.HostInterfaces = []string{"en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9", "ppp0", "ppp1", "ppp2"} } if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } // Errors templates := map[string]*string{ "guest_os_type": &b.config.GuestOSType, "hard_drive_interface": &b.config.HardDriveInterface, "http_directory": &b.config.HTTPDir, "iso_checksum": &b.config.ISOChecksum, "iso_checksum_type": &b.config.ISOChecksumType, "iso_url": &b.config.RawSingleISOUrl, "vm_name": &b.config.VMName, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, url := range b.config.ISOUrls { var err error b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) } } for i, command := range b.config.BootCommand { if err := b.config.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" && b.config.HardDriveInterface != "scsi" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide, sata, or scsi")) } if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) } else { b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) if b.config.ISOChecksumType != "none" { if b.config.ISOChecksum == "" { errs = packer.MultiErrorAppend( errs, errors.New("Due to large file sizes, an iso_checksum is required")) } else { b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) } if h := common.HashForType(b.config.ISOChecksumType); h == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) } } } if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("One of iso_url or iso_urls must be specified.")) } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { errs = packer.MultiErrorAppend( errs, errors.New("Only one of iso_url or iso_urls may be specified.")) } else if b.config.RawSingleISOUrl != "" { b.config.ISOUrls = []string{b.config.RawSingleISOUrl} } for i, url := range b.config.ISOUrls { b.config.ISOUrls[i], err = common.DownloadableURL(url) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) } } // Warnings if b.config.ISOChecksumType == "none" { warnings = append(warnings, "A checksum type of 'none' was specified. Since ISO files are so big,\n"+ "a checksum is highly recommended.") } if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ "will forcibly halt the virtual machine, which may result in data loss.") } if b.config.ParallelsToolsHostPath != "" { warnings = append(warnings, "A 'parallels_tools_host_path' has been deprecated and not in use anymore\n"+ "You can remove it from your Packer template.") } if errs != nil && len(errs.Errors) > 0 { return warnings, errs } return warnings, nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "chmod +x {{.Path}}; {{.Vars}} {{.Path}}" } if p.config.Inline != nil && len(p.config.Inline) == 0 { p.config.Inline = nil } if p.config.InlineShebang == "" { p.config.InlineShebang = "/bin/sh" } if p.config.RawStartRetryTimeout == "" { p.config.RawStartRetryTimeout = "5m" } if p.config.RemotePath == "" { p.config.RemotePath = DefaultRemotePath } if p.config.Scripts == nil { p.config.Scripts = make([]string, 0) } if p.config.Vars == nil { p.config.Vars = make([]string, 0) } if p.config.Script != "" && len(p.config.Scripts) > 0 { errs = packer.MultiErrorAppend(errs, errors.New("Only one of script or scripts can be specified.")) } if p.config.Script != "" { p.config.Scripts = []string{p.config.Script} } templates := map[string]*string{ "inline_shebang": &p.config.InlineShebang, "script": &p.config.Script, "start_retry_timeout": &p.config.RawStartRetryTimeout, "remote_path": &p.config.RemotePath, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } sliceTemplates := map[string][]string{ "inline": p.config.Inline, "scripts": p.config.Scripts, "environment_vars": p.config.Vars, } for n, slice := range sliceTemplates { for i, elem := range slice { var err error slice[i], err = p.config.tpl.Process(elem, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) } } } if len(p.config.Scripts) == 0 && p.config.Inline == nil { errs = packer.MultiErrorAppend(errs, errors.New("Either a script file or inline script must be specified.")) } else if len(p.config.Scripts) > 0 && p.config.Inline != nil { errs = packer.MultiErrorAppend(errs, errors.New("Only a script file or an inline script can be specified, not both.")) } for _, path := range p.config.Scripts { if _, err := os.Stat(path); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Bad script '%s': %s", path, err)) } } // Do a check for bad environment variables, such as '=foo', 'foobar' for _, kv := range p.config.Vars { vs := strings.Split(kv, "=") if len(vs) != 2 || vs[0] == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) } } if p.config.RawStartRetryTimeout != "" { p.config.startRetryTimeout, err = time.ParseDuration(p.config.RawStartRetryTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing start_retry_timeout: %s", err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-solo --no-color -c {{.ConfigPath}} -j {{.JsonPath}}" } if p.config.InstallCommand == "" { p.config.InstallCommand = "curl -L https://www.opscode.com/chef/install.sh | {{if .Sudo}}sudo {{end}}bash" } if p.config.RunList == nil { p.config.RunList = make([]string, 0) } if p.config.StagingDir == "" { p.config.StagingDir = "/tmp/packer-chef-solo" } // Accumulate any errors errs := common.CheckUnusedConfig(md) templates := map[string]*string{ "config_template": &p.config.ConfigTemplate, "data_bags_path": &p.config.DataBagsPath, "encrypted_data_bag_secret": &p.config.EncryptedDataBagSecretPath, "roles_path": &p.config.RolesPath, "staging_dir": &p.config.StagingDir, "environments_path": &p.config.EnvironmentsPath, "chef_environment": &p.config.ChefEnvironment, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } sliceTemplates := map[string][]string{ "cookbook_paths": p.config.CookbookPaths, "remote_cookbook_paths": p.config.RemoteCookbookPaths, "run_list": p.config.RunList, } for n, slice := range sliceTemplates { for i, elem := range slice { var err error slice[i], err = p.config.tpl.Process(elem, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) } } } validates := map[string]*string{ "execute_command": &p.config.ExecuteCommand, "install_command": &p.config.InstallCommand, } for n, ptr := range validates { if err := p.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } if p.config.ConfigTemplate != "" { fi, err := os.Stat(p.config.ConfigTemplate) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad config template path: %s", err)) } else if fi.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Config template path must be a file: %s", err)) } } for _, path := range p.config.CookbookPaths { pFileInfo, err := os.Stat(path) if err != nil || !pFileInfo.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad cookbook path '%s': %s", path, err)) } } if p.config.RolesPath != "" { pFileInfo, err := os.Stat(p.config.RolesPath) if err != nil || !pFileInfo.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad roles path '%s': %s", p.config.RolesPath, err)) } } if p.config.DataBagsPath != "" { pFileInfo, err := os.Stat(p.config.DataBagsPath) if err != nil || !pFileInfo.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad data bags path '%s': %s", p.config.DataBagsPath, err)) } } if p.config.EncryptedDataBagSecretPath != "" { pFileInfo, err := os.Stat(p.config.EncryptedDataBagSecretPath) if err != nil || pFileInfo.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad encrypted data bag secret '%s': %s", p.config.EncryptedDataBagSecretPath, err)) } } if p.config.EnvironmentsPath != "" { pFileInfo, err := os.Stat(p.config.EnvironmentsPath) if err != nil || !pFileInfo.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad environments path '%s': %s", p.config.EnvironmentsPath, err)) } } jsonValid := true for k, v := range p.config.Json { p.config.Json[k], err = p.deepJsonFix(k, v) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing JSON: %s", err)) jsonValid = false } } if jsonValid { // Process the user variables within the JSON and set the JSON. // Do this early so that we can validate and show errors. p.config.Json, err = p.processJsonUserVars() if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing user variables in JSON: %s", err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars if p.config.ExecuteCommand == "" { p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-client " + "--no-color -c {{.ConfigPath}} -j {{.JsonPath}}" } if p.config.InstallCommand == "" { p.config.InstallCommand = "curl -L " + "https://www.opscode.com/chef/install.sh | " + "{{if .Sudo}}sudo {{end}}bash" } if p.config.RunList == nil { p.config.RunList = make([]string, 0) } if p.config.StagingDir == "" { p.config.StagingDir = "/tmp/packer-chef-client" } // Accumulate any errors errs := common.CheckUnusedConfig(md) templates := map[string]*string{ "config_template": &p.config.ConfigTemplate, "node_name": &p.config.NodeName, "staging_dir": &p.config.StagingDir, "chef_server_url": &p.config.ServerUrl, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } sliceTemplates := map[string][]string{ "run_list": p.config.RunList, } for n, slice := range sliceTemplates { for i, elem := range slice { var err error slice[i], err = p.config.tpl.Process(elem, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) } } } validates := map[string]*string{ "execute_command": &p.config.ExecuteCommand, "install_command": &p.config.InstallCommand, } for n, ptr := range validates { if err := p.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } if p.config.ConfigTemplate != "" { fi, err := os.Stat(p.config.ConfigTemplate) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Bad config template path: %s", err)) } else if fi.IsDir() { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Config template path must be a file: %s", err)) } } if p.config.ServerUrl == "" { errs = packer.MultiErrorAppend( errs, fmt.Errorf("server_url must be set")) } jsonValid := true for k, v := range p.config.Json { p.config.Json[k], err = p.deepJsonFix(k, v) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing JSON: %s", err)) jsonValid = false } } if jsonValid { // Process the user variables within the JSON and set the JSON. // Do this early so that we can validate and show errors. p.config.Json, err = p.processJsonUserVars() if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing user variables in JSON: %s", err)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { p.done = make(chan struct{}) copy(p.config.raws, raws) md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) // Defaults if p.config.Command == "" { p.config.Command = "ansible-playbook" } // Templates templates := map[string]*string{ "command": &p.config.Command, "playbook_file": &p.config.PlaybookFile, "local_port": &p.config.LocalPort, "ssh_host_key_file": &p.config.SSHHostKeyFile, "ssh_authorized_key_file": &p.config.SSHAuthorizedKeyFile, "sftp_cmd": &p.config.SFTPCmd, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } sliceTemplates := map[string][]string{ "extra_arguments": p.config.ExtraArguments, } for n, slice := range sliceTemplates { for i, elem := range slice { var err error slice[i], err = p.config.tpl.Process(elem, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) } } } err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) } err = validateFileConfig(p.config.SSHAuthorizedKeyFile, "ssh_authorized_key_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) } // Check that the host key file exists, if configured if len(p.config.SSHHostKeyFile) > 0 { err = validateFileConfig(p.config.SSHHostKeyFile, "ssh_host_key_file", true) if err != nil { log.Println(p.config.SSHHostKeyFile, "does not exist") errs = packer.MultiErrorAppend(errs, err) } } if len(p.config.LocalPort) > 0 { if _, err := strconv.ParseUint(p.config.LocalPort, 10, 16); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("local_port: %s must be a valid port", p.config.LocalPort)) } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) md, err := common.DecodeConfig(c, raws...) if err != nil { return nil, nil, err } c.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, nil, err } c.tpl.UserVars = c.PackerUserVars // Defaults if c.VMName == "" { c.VMName = fmt.Sprintf("packer-%s-{{timestamp}}", c.PackerBuildName) } // Prepare the errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, c.DriverConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.OutputConfig.Prepare(c.tpl, &c.PackerConfig)...) errs = packer.MultiErrorAppend(errs, c.RunConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.ShutdownConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.SSHConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.ToolsConfig.Prepare(c.tpl)...) errs = packer.MultiErrorAppend(errs, c.VMXConfig.Prepare(c.tpl)...) templates := map[string]*string{ "remote_type": &c.RemoteType, "source_path": &c.SourcePath, "vm_name": &c.VMName, } for n, ptr := range templates { var err error *ptr, err = c.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, file := range c.FloppyFiles { var err error c.FloppyFiles[i], err = c.tpl.Process(file, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing floppy_files[%d]: %s", i, err)) } } for i, command := range c.BootCommand { if err := c.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } if c.SourcePath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is required")) } else { if _, err := os.Stat(c.SourcePath); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_path is invalid: %s", err)) } } // Warnings var warnings []string if c.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ "will forcibly halt the virtual machine, which may result in data loss.") } // Check for any errors. if errs != nil && len(errs.Errors) > 0 { return nil, warnings, errs } return c, warnings, nil }
func (p *Provisioner) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&p.config, raws...) if err != nil { return err } p.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } p.config.tpl.UserVars = p.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) if p.config.Inline != nil && len(p.config.Inline) == 0 { p.config.Inline = nil } if p.config.DistrDstPath == "" { p.config.DistrDstPath = DistrDstPathDefault } sliceTemplates := map[string][]string{ "inline": p.config.Inline, } for n, slice := range sliceTemplates { for i, elem := range slice { var err error slice[i], err = p.config.tpl.Process(elem, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err)) } } } log.Println(fmt.Sprintf("%s: %v", "inline", p.config.ScriptPath)) templates := map[string]*string{ "script_path": &p.config.ScriptPath, "distr_src_path": &p.config.DistrSrcPath, "distr_dst_path": &p.config.DistrDstPath, } for n, ptr := range templates { var err error *ptr, err = p.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } log.Println(fmt.Sprintf("%s: %v", "script_path", p.config.DistrDstPath)) if len(p.config.ScriptPath) == 0 && p.config.Inline == nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Either a script file or inline script must be specified.")) } if len(p.config.ScriptPath) != 0 { if _, err := os.Stat(p.config.ScriptPath); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("script_path: '%v' check the path is correct.", p.config.ScriptPath)) } } log.Println(fmt.Sprintf("%s: %v", "script_path", p.config.ScriptPath)) if len(p.config.DistrSrcPath) != 0 { if _, err := os.Stat(p.config.DistrSrcPath); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("distr_src_path: '%v' check the path is correct.", p.config.DistrSrcPath)) } } log.Println(fmt.Sprintf("%s: %v", "distr_src_path", p.config.DistrSrcPath)) if errs != nil && len(errs.Errors) > 0 { return errs } return nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) warnings := make([]string, 0) if b.config.DiskName == "" { b.config.DiskName = "disk" } if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } if b.config.DiskTypeId == "" { // Default is growable virtual disk split in 2GB files. b.config.DiskTypeId = "1" if b.config.RemoteType == "esx5" { b.config.DiskTypeId = "zeroedthick" } } if b.config.FloppyFiles == nil { b.config.FloppyFiles = make([]string, 0) } if b.config.GuestOSType == "" { b.config.GuestOSType = "other" } if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if b.config.RawBootWait == "" { b.config.RawBootWait = "10s" } if b.config.VNCPortMin == 0 { b.config.VNCPortMin = 5900 } if b.config.VNCPortMax == 0 { b.config.VNCPortMax = 6000 } if b.config.OutputDir == "" { b.config.OutputDir = fmt.Sprintf("output-%s", b.config.PackerBuildName) } if b.config.RemoteUser == "" { b.config.RemoteUser = "******" } if b.config.RemoteDatastore == "" { b.config.RemoteDatastore = "datastore1" } if b.config.RemotePort == 0 { b.config.RemotePort = 22 } if b.config.SSHPort == 0 { b.config.SSHPort = 22 } if b.config.ToolsUploadPath == "" { b.config.ToolsUploadPath = "{{ .Flavor }}.iso" } // Errors templates := map[string]*string{ "disk_name": &b.config.DiskName, "guest_os_type": &b.config.GuestOSType, "http_directory": &b.config.HTTPDir, "iso_checksum": &b.config.ISOChecksum, "iso_checksum_type": &b.config.ISOChecksumType, "iso_url": &b.config.RawSingleISOUrl, "output_directory": &b.config.OutputDir, "shutdown_command": &b.config.ShutdownCommand, "ssh_key_path": &b.config.SSHKeyPath, "ssh_password": &b.config.SSHPassword, "ssh_username": &b.config.SSHUser, "tools_upload_flavor": &b.config.ToolsUploadFlavor, "vm_name": &b.config.VMName, "boot_wait": &b.config.RawBootWait, "shutdown_timeout": &b.config.RawShutdownTimeout, "ssh_wait_timeout": &b.config.RawSSHWaitTimeout, "vmx_template_path": &b.config.VMXTemplatePath, "remote_type": &b.config.RemoteType, "remote_host": &b.config.RemoteHost, "remote_datastore": &b.config.RemoteDatastore, "remote_user": &b.config.RemoteUser, "remote_password": &b.config.RemotePassword, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, url := range b.config.ISOUrls { var err error b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) } } for i, command := range b.config.BootCommand { if err := b.config.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } for i, file := range b.config.FloppyFiles { var err error b.config.FloppyFiles[i], err = b.config.tpl.Process(file, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing floppy_files[%d]: %s", i, err)) } } newVMXData := make(map[string]string) for k, v := range b.config.VMXData { k, err = b.config.tpl.Process(k, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing VMX data key %s: %s", k, err)) continue } v, err = b.config.tpl.Process(v, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing VMX data value '%s': %s", v, err)) continue } newVMXData[k] = v } b.config.VMXData = newVMXData if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } if b.config.ISOChecksum == "" { errs = packer.MultiErrorAppend( errs, errors.New("Due to large file sizes, an iso_checksum is required")) } else { b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) } if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) } else { b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) if h := common.HashForType(b.config.ISOChecksumType); h == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) } } if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("One of iso_url or iso_urls must be specified.")) } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { errs = packer.MultiErrorAppend( errs, errors.New("Only one of iso_url or iso_urls may be specified.")) } else if b.config.RawSingleISOUrl != "" { b.config.ISOUrls = []string{b.config.RawSingleISOUrl} } for i, url := range b.config.ISOUrls { b.config.ISOUrls[i], err = common.DownloadableURL(url) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) } } if !b.config.PackerForce { if _, err := os.Stat(b.config.OutputDir); err == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Output directory '%s' already exists. It must not exist.", b.config.OutputDir)) } } if b.config.SSHKeyPath != "" { if _, err := os.Stat(b.config.SSHKeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) } else if _, err := sshKeyToKeyring(b.config.SSHKeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) } } if b.config.SSHUser == "" { errs = packer.MultiErrorAppend( errs, errors.New("An ssh_username must be specified.")) } if b.config.RawBootWait != "" { b.config.bootWait, err = time.ParseDuration(b.config.RawBootWait) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing boot_wait: %s", err)) } } if b.config.RawShutdownTimeout == "" { b.config.RawShutdownTimeout = "5m" } b.config.shutdownTimeout, err = time.ParseDuration(b.config.RawShutdownTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing shutdown_timeout: %s", err)) } if b.config.RawSSHWaitTimeout == "" { b.config.RawSSHWaitTimeout = "20m" } b.config.sshWaitTimeout, err = time.ParseDuration(b.config.RawSSHWaitTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) } if _, err := template.New("path").Parse(b.config.ToolsUploadPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("tools_upload_path invalid: %s", err)) } if b.config.VMXTemplatePath != "" { if err := b.validateVMXTemplatePath(); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("vmx_template_path is invalid: %s", err)) } } if b.config.VNCPortMin > b.config.VNCPortMax { errs = packer.MultiErrorAppend( errs, fmt.Errorf("vnc_port_min must be less than vnc_port_max")) } // Remote configuration validation if b.config.RemoteType != "" { if b.config.RemoteHost == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("remote_host must be specified")) } } // Warnings if b.config.ShutdownCommand == "" { warnings = append(warnings, "A shutdown_command was not specified. Without a shutdown command, Packer\n"+ "will forcibly halt the virtual machine, which may result in data loss.") } if errs != nil && len(errs.Errors) > 0 { return warnings, errs } return warnings, nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) md, err := common.DecodeConfig(c, raws...) if err != nil { return nil, nil, err } c.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, nil, err } c.tpl.UserVars = c.PackerUserVars // Prepare the errors errs := common.CheckUnusedConfig(md) // Set defaults. if c.Network == "" { c.Network = "default" } if c.ImageDescription == "" { c.ImageDescription = "Created by Packer" } if c.ImageName == "" { c.ImageName = "packer-{{timestamp}}" } if c.MachineType == "" { c.MachineType = "n1-standard-1" } if c.RawSSHTimeout == "" { c.RawSSHTimeout = "5m" } if c.RawStateTimeout == "" { c.RawStateTimeout = "5m" } if c.SSHUsername == "" { c.SSHUsername = "******" } if c.SSHPort == 0 { c.SSHPort = 22 } // Process Templates templates := map[string]*string{ "bucket_name": &c.BucketName, "client_secrets_file": &c.ClientSecretsFile, "image_name": &c.ImageName, "image_description": &c.ImageDescription, "machine_type": &c.MachineType, "network": &c.Network, "passphrase": &c.Passphrase, "private_key_file": &c.PrivateKeyFile, "project_id": &c.ProjectId, "source_image": &c.SourceImage, "ssh_username": &c.SSHUsername, "ssh_timeout": &c.RawSSHTimeout, "state_timeout": &c.RawStateTimeout, "zone": &c.Zone, } for n, ptr := range templates { var err error *ptr, err = c.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } // Process required parameters. if c.BucketName == "" { errs = packer.MultiErrorAppend( errs, errors.New("a bucket_name must be specified")) } if c.ClientSecretsFile == "" { errs = packer.MultiErrorAppend( errs, errors.New("a client_secrets_file must be specified")) } if c.PrivateKeyFile == "" { errs = packer.MultiErrorAppend( errs, errors.New("a private_key_file must be specified")) } if c.ProjectId == "" { errs = packer.MultiErrorAppend( errs, errors.New("a project_id must be specified")) } if c.SourceImage == "" { errs = packer.MultiErrorAppend( errs, errors.New("a source_image must be specified")) } if c.Zone == "" { errs = packer.MultiErrorAppend( errs, errors.New("a zone must be specified")) } // Process timeout settings. sshTimeout, err := time.ParseDuration(c.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } c.sshTimeout = sshTimeout stateTimeout, err := time.ParseDuration(c.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } c.stateTimeout = stateTimeout if c.ClientSecretsFile != "" { // Load the client secrets file. cs, err := loadClientSecrets(c.ClientSecretsFile) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing client secrets file: %s", err)) } c.clientSecrets = cs } if c.PrivateKeyFile != "" { // Load the private key. c.privateKeyBytes, err = processPrivateKeyFile(c.PrivateKeyFile, c.Passphrase) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed loading private key file: %s", err)) } } // Check for any errors. if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } return c, nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) if b.config.BundleDestination == "" { b.config.BundleDestination = "/tmp" } if b.config.BundlePrefix == "" { b.config.BundlePrefix = "image-{{timestamp}}" } if b.config.BundleUploadCommand == "" { b.config.BundleUploadCommand = "sudo -n ec2-upload-bundle " + "-b {{.BucketName}} " + "-m {{.ManifestPath}} " + "-a {{.AccessKey}} " + "-s {{.SecretKey}} " + "-d {{.BundleDirectory}} " + "--batch " + "--url {{.S3Endpoint}} " + "--retry" } if b.config.BundleVolCommand == "" { b.config.BundleVolCommand = "sudo -n ec2-bundle-vol " + "-k {{.KeyPath}} " + "-u {{.AccountId}} " + "-c {{.CertPath}} " + "-r {{.Architecture}} " + "-e {{.PrivatePath}}/* " + "-d {{.Destination}} " + "-p {{.Prefix}} " + "--batch" } if b.config.X509UploadPath == "" { b.config.X509UploadPath = "/tmp" } // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) validates := map[string]*string{ "bundle_upload_command": &b.config.BundleUploadCommand, "bundle_vol_command": &b.config.BundleVolCommand, } for n, ptr := range validates { if err := b.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } templates := map[string]*string{ "account_id": &b.config.AccountId, "ami_name": &b.config.AMIName, "bundle_destination": &b.config.BundleDestination, "bundle_prefix": &b.config.BundlePrefix, "s3_bucket": &b.config.S3Bucket, "x509_cert_path": &b.config.X509CertPath, "x509_key_path": &b.config.X509KeyPath, "x509_upload_path": &b.config.X509UploadPath, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if b.config.AccountId == "" { errs = packer.MultiErrorAppend(errs, errors.New("account_id is required")) } else { b.config.AccountId = strings.Replace(b.config.AccountId, "-", "", -1) } if b.config.S3Bucket == "" { errs = packer.MultiErrorAppend(errs, errors.New("s3_bucket is required")) } if b.config.X509CertPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_cert_path is required")) } else if _, err := os.Stat(b.config.X509CertPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_cert_path points to bad file: %s", err)) } if b.config.X509KeyPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_key_path is required")) } else if _, err := os.Stat(b.config.X509KeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_key_path points to bad file: %s", err)) } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) if b.config.DiskSize == 0 { b.config.DiskSize = 40000 } if b.config.FloppyFiles == nil { b.config.FloppyFiles = make([]string, 0) } if b.config.GuestAdditionsPath == "" { b.config.GuestAdditionsPath = "VBoxGuestAdditions.iso" } if b.config.HardDriveInterface == "" { b.config.HardDriveInterface = "ide" } if b.config.GuestOSType == "" { b.config.GuestOSType = "Other" } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if b.config.OutputDir == "" { b.config.OutputDir = fmt.Sprintf("output-%s", b.config.PackerBuildName) } if b.config.RawBootWait == "" { b.config.RawBootWait = "10s" } if b.config.SSHHostPortMin == 0 { b.config.SSHHostPortMin = 2222 } if b.config.SSHHostPortMax == 0 { b.config.SSHHostPortMax = 4444 } if b.config.SSHPort == 0 { b.config.SSHPort = 22 } if b.config.VBoxManage == nil { b.config.VBoxManage = make([][]string, 0) } if b.config.VBoxVersionFile == "" { b.config.VBoxVersionFile = ".vbox_version" } if b.config.VMName == "" { b.config.VMName = fmt.Sprintf("packer-%s", b.config.PackerBuildName) } if b.config.Format == "" { b.config.Format = "ovf" } // Errors templates := map[string]*string{ "guest_additions_sha256": &b.config.GuestAdditionsSHA256, "guest_os_type": &b.config.GuestOSType, "hard_drive_interface": &b.config.HardDriveInterface, "http_directory": &b.config.HTTPDir, "iso_checksum": &b.config.ISOChecksum, "iso_checksum_type": &b.config.ISOChecksumType, "iso_url": &b.config.RawSingleISOUrl, "output_directory": &b.config.OutputDir, "shutdown_command": &b.config.ShutdownCommand, "ssh_key_path": &b.config.SSHKeyPath, "ssh_password": &b.config.SSHPassword, "ssh_username": &b.config.SSHUser, "virtualbox_version_file": &b.config.VBoxVersionFile, "vm_name": &b.config.VMName, "format": &b.config.Format, "boot_wait": &b.config.RawBootWait, "shutdown_timeout": &b.config.RawShutdownTimeout, "ssh_wait_timeout": &b.config.RawSSHWaitTimeout, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } for i, url := range b.config.ISOUrls { var err error b.config.ISOUrls[i], err = b.config.tpl.Process(url, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing iso_urls[%d]: %s", i, err)) } } validates := map[string]*string{ "guest_additions_path": &b.config.GuestAdditionsPath, "guest_additions_url": &b.config.GuestAdditionsURL, } for n, ptr := range validates { if err := b.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } for i, command := range b.config.BootCommand { if err := b.config.tpl.Validate(command); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing boot_command[%d]: %s", i, err)) } } for i, file := range b.config.FloppyFiles { var err error b.config.FloppyFiles[i], err = b.config.tpl.Process(file, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing floppy_files[%d]: %s", i, err)) } } if !(b.config.Format == "ovf" || b.config.Format == "ova") { errs = packer.MultiErrorAppend( errs, errors.New("invalid format, only 'ovf' or 'ova' are allowed")) } if b.config.HardDriveInterface != "ide" && b.config.HardDriveInterface != "sata" { errs = packer.MultiErrorAppend( errs, errors.New("hard_drive_interface can only be ide or sata")) } if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } if b.config.ISOChecksum == "" { errs = packer.MultiErrorAppend( errs, errors.New("Due to large file sizes, an iso_checksum is required")) } else { b.config.ISOChecksum = strings.ToLower(b.config.ISOChecksum) } if b.config.ISOChecksumType == "" { errs = packer.MultiErrorAppend( errs, errors.New("The iso_checksum_type must be specified.")) } else { b.config.ISOChecksumType = strings.ToLower(b.config.ISOChecksumType) if h := common.HashForType(b.config.ISOChecksumType); h == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unsupported checksum type: %s", b.config.ISOChecksumType)) } } if b.config.RawSingleISOUrl == "" && len(b.config.ISOUrls) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("One of iso_url or iso_urls must be specified.")) } else if b.config.RawSingleISOUrl != "" && len(b.config.ISOUrls) > 0 { errs = packer.MultiErrorAppend( errs, errors.New("Only one of iso_url or iso_urls may be specified.")) } else if b.config.RawSingleISOUrl != "" { b.config.ISOUrls = []string{b.config.RawSingleISOUrl} } for i, url := range b.config.ISOUrls { b.config.ISOUrls[i], err = common.DownloadableURL(url) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed to parse iso_url %d: %s", i+1, err)) } } if b.config.GuestAdditionsSHA256 != "" { b.config.GuestAdditionsSHA256 = strings.ToLower(b.config.GuestAdditionsSHA256) } if !b.config.PackerForce { if _, err := os.Stat(b.config.OutputDir); err == nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Output directory '%s' already exists. It must not exist.", b.config.OutputDir)) } } b.config.bootWait, err = time.ParseDuration(b.config.RawBootWait) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing boot_wait: %s", err)) } if b.config.RawShutdownTimeout == "" { b.config.RawShutdownTimeout = "5m" } if b.config.RawSSHWaitTimeout == "" { b.config.RawSSHWaitTimeout = "20m" } b.config.shutdownTimeout, err = time.ParseDuration(b.config.RawShutdownTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing shutdown_timeout: %s", err)) } if b.config.SSHKeyPath != "" { if _, err := os.Stat(b.config.SSHKeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) } else if _, err := sshKeyToKeyring(b.config.SSHKeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("ssh_key_path is invalid: %s", err)) } } if b.config.SSHHostPortMin > b.config.SSHHostPortMax { errs = packer.MultiErrorAppend( errs, errors.New("ssh_host_port_min must be less than ssh_host_port_max")) } if b.config.SSHUser == "" { errs = packer.MultiErrorAppend( errs, errors.New("An ssh_username must be specified.")) } b.config.sshWaitTimeout, err = time.ParseDuration(b.config.RawSSHWaitTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_wait_timeout: %s", err)) } for i, args := range b.config.VBoxManage { for j, arg := range args { if err := b.config.tpl.Validate(arg); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing vboxmanage[%d][%d]: %s", i, j, err)) } } } if errs != nil && len(errs.Errors) > 0 { return errs } return nil }