func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.Password)) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) error { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) if errs != nil && len(errs.Errors) > 0 { return errs } log.Println(common.ScrubConfig(b.config), b.config.AccessKey, b.config.SecretKey) return nil }
func (builder *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&builder.config, raws...) if err != nil { return nil, err } builder.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } builder.config.tpl.UserVars = builder.config.PackerUserVars builder.config.tpl.Funcs(awscommon.TemplateFuncs) if builder.config.VolumeSize == 0 { builder.config.VolumeSize = 12 } if builder.config.VolumeType == "" { builder.config.VolumeType = "standard" } if builder.config.RootDeviceName == "" { builder.config.RootDeviceName = "/dev/xvda" } builder.ensureWorkerDeviceMapping() // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, builder.config.AccessConfig.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.BlockDevices.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.AMIConfig.Prepare(builder.config.tpl)...) errs = packer.MultiErrorAppend(errs, builder.config.RunConfig.Prepare(builder.config.tpl)...) templates := map[string]*string{ "worker_device_name": &builder.config.WorkerDeviceName, "root_device_name": &builder.config.RootDeviceName, } for n, ptr := range templates { var err error *ptr, err = builder.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(builder.config, builder.config.AccessKey, builder.config.SecretKey)) return nil, nil }
// Entry point for configuration parisng when we've defined func (p *PostProcessor) Configure(raws ...interface{}) error { p.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&p.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &p.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "s3_key_name", }, }, }, raws...) if err != nil { return err } // Set defaults if p.config.S3Key == "" { p.config.S3Key = "packer-import-{{timestamp}}.ova" } errs := new(packer.MultiError) // Check and render s3_key_name if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing s3_key_name template: %s", err)) } // Check we have AWS access variables defined somewhere errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...) // define all our required paramaters templates := map[string]*string{ "s3_bucket_name": &p.config.S3Bucket, } // Check out required params are defined for key, ptr := range templates { if *ptr == "" { errs = packer.MultiErrorAppend( errs, fmt.Errorf("%s must be set", key)) } } // Anything which flagged return back up the stack if len(errs.Errors) > 0 { return errs } log.Println(common.ScrubConfig(p.config, p.config.AccessKey, p.config.SecretKey)) return nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, }, raws...) if err != nil { return nil, err } // Accumulate any errors var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.Password)) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, }, raws...) if err != nil { return nil, err } // Accumulate any errors var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, }, raws...) if err != nil { return nil, err } var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.ImageConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.WorkloadConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.Comm.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.MachineTypeConfig.Prepare(&b.config.ctx)...) if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessToken, b.config.MacKey)) return nil, nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { c := new(Config) var md mapstructure.Metadata err := config.Decode(c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", }, }, }, raws...) if err != nil { return nil, nil, err } // Defaults if c.APIToken == "" { // Default to environment variable for api_token, if it exists c.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN") } if c.SnapshotName == "" { def, err := interpolate.Render("packer-{{timestamp}}", nil) if err != nil { panic(err) } // Default to packer-{{ unix timestamp (utc) }} c.SnapshotName = def } if c.DropletName == "" { // Default to packer-[time-ordered-uuid] c.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } if c.Comm.SSHUsername == "" { // Default to "root". You can override this if your // SourceImage has a different user account then the DO default c.Comm.SSHUsername = "******" } if c.StateTimeout == 0 { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for droplet to become active c.StateTimeout = 6 * time.Minute } var errs *packer.MultiError if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } if c.APIToken == "" { // Required configurations that will display errors if not set errs = packer.MultiErrorAppend( errs, errors.New("api_token for auth must be specified")) } if c.Region == "" { errs = packer.MultiErrorAppend( errs, errors.New("region is required")) } if c.Size == "" { errs = packer.MultiErrorAppend( errs, errors.New("size is required")) } if c.Image == "" { errs = packer.MultiErrorAppend( errs, errors.New("image is required")) } if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } common.ScrubConfig(c, c.APIToken) return c, nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ctx = &interpolate.Context{Funcs: awscommon.TemplateFuncs} err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "bundle_upload_command", "bundle_vol_command", }, }, }, raws...) if err != nil { return nil, err } if b.config.BundleDestination == "" { b.config.BundleDestination = "/tmp" } if b.config.BundlePrefix == "" { b.config.BundlePrefix = "image-{{timestamp}}" } if b.config.BundleUploadCommand == "" { b.config.BundleUploadCommand = "sudo -i -n ec2-upload-bundle " + "-b {{.BucketName}} " + "-m {{.ManifestPath}} " + "-a {{.AccessKey}} " + "-s {{.SecretKey}} " + "-d {{.BundleDirectory}} " + "--batch " + "--location {{.Region}} " + "--retry" } if b.config.BundleVolCommand == "" { b.config.BundleVolCommand = "sudo -i -n ec2-bundle-vol " + "-k {{.KeyPath}} " + "-u {{.AccountId}} " + "-c {{.CertPath}} " + "-r {{.Architecture}} " + "-e {{.PrivatePath}}/* " + "-d {{.Destination}} " + "-p {{.Prefix}} " + "--batch " + "--no-filter" } if b.config.X509UploadPath == "" { b.config.X509UploadPath = "/tmp" } // Accumulate any errors var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.ctx)...) if b.config.AccountId == "" { errs = packer.MultiErrorAppend(errs, errors.New("account_id is required")) } else { b.config.AccountId = strings.Replace(b.config.AccountId, "-", "", -1) } if b.config.S3Bucket == "" { errs = packer.MultiErrorAppend(errs, errors.New("s3_bucket is required")) } if b.config.X509CertPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_cert_path is required")) } else if _, err := os.Stat(b.config.X509CertPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_cert_path points to bad file: %s", err)) } if b.config.X509KeyPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_key_path is required")) } else if _, err := os.Stat(b.config.X509KeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_key_path points to bad file: %s", err)) } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) // Optional configuration with defaults if b.config.APIKey == "" { // Default to environment variable for api_key, if it exists b.config.APIKey = os.Getenv("DIGITALOCEAN_API_KEY") } if b.config.ClientID == "" { // Default to environment variable for client_id, if it exists b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID") } if b.config.RegionID == 0 { // Default to Region "New York" b.config.RegionID = 1 } if b.config.SizeID == 0 { // Default to 512mb, the smallest droplet size b.config.SizeID = 66 } if b.config.ImageID == 0 { // Default to base image "Ubuntu 12.04.4 x64 (id: 3101045)" b.config.ImageID = 3101045 } if b.config.SnapshotName == "" { // Default to packer-{{ unix timestamp (utc) }} b.config.SnapshotName = "packer-{{timestamp}}" } if b.config.DropletName == "" { // Default to packer-[time-ordered-uuid] b.config.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } if b.config.SSHUsername == "" { // Default to "root". You can override this if your // SourceImage has a different user account then the DO default b.config.SSHUsername = "******" } if b.config.SSHPort == 0 { // Default to port 22 per DO default b.config.SSHPort = 22 } if b.config.RawSSHTimeout == "" { // Default to 1 minute timeouts b.config.RawSSHTimeout = "1m" } if b.config.RawStateTimeout == "" { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for droplet to become active b.config.RawStateTimeout = "6m" } templates := map[string]*string{ "client_id": &b.config.ClientID, "api_key": &b.config.APIKey, "snapshot_name": &b.config.SnapshotName, "droplet_name": &b.config.DropletName, "ssh_username": &b.config.SSHUsername, "ssh_timeout": &b.config.RawSSHTimeout, "state_timeout": &b.config.RawStateTimeout, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } // Required configurations that will display errors if not set if b.config.ClientID == "" { errs = packer.MultiErrorAppend( errs, errors.New("a client_id must be specified")) } if b.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("an api_key must be specified")) } sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } b.config.sshTimeout = sshTimeout stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } b.config.stateTimeout = stateTimeout if errs != nil && len(errs.Errors) > 0 { return nil, errs } common.ScrubConfig(b.config, b.config.ClientID, b.config.APIKey) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "command_wrapper", "mount_path", }, }, }, raws...) if err != nil { return nil, err } // Defaults if b.config.ChrootMounts == nil { b.config.ChrootMounts = make([][]string, 0) } if b.config.CopyFiles == nil { b.config.CopyFiles = make([]string, 0) } if len(b.config.ChrootMounts) == 0 { b.config.ChrootMounts = [][]string{ []string{"proc", "proc", "/proc"}, []string{"sysfs", "sysfs", "/sys"}, []string{"bind", "/dev", "/dev"}, []string{"devpts", "devpts", "/dev/pts"}, []string{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"}, } } if len(b.config.CopyFiles) == 0 { b.config.CopyFiles = []string{"/etc/resolv.conf"} } if b.config.CommandWrapper == "" { b.config.CommandWrapper = "{{.Command}}" } if b.config.MountPath == "" { b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}" } if b.config.MountPartition == 0 { b.config.MountPartition = 1 } // Accumulate any errors var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...) for _, mounts := range b.config.ChrootMounts { if len(mounts) != 3 { errs = packer.MultiErrorAppend( errs, errors.New("Each chroot_mounts entry should be three elements.")) break } } if b.config.SourceAmi == "" { errs = packer.MultiErrorAppend(errs, errors.New("source_ami is required.")) } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
// Prepare processes the build configuration parameters. func (self *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&self.config, &config.DecodeOpts{ Interpolate: true, }, raws...) if err != nil { return nil, err } errs := &packer.MultiError{} // Assign default values if possible if self.config.APIKey == "" { // Default to environment variable for api_key, if it exists self.config.APIKey = os.Getenv("SOFTLAYER_API_KEY") } if self.config.Username == "" { // Default to environment variable for client_id, if it exists self.config.Username = os.Getenv("SOFTLAYER_USER_NAME") } if self.config.DatacenterName == "" { self.config.DatacenterName = "ams01" } if self.config.InstanceName == "" { self.config.InstanceName = fmt.Sprintf("packer-softlayer-%s", time.Now().Unix()) } if self.config.InstanceDomain == "" { self.config.InstanceDomain = "defaultdomain.com" } if self.config.ImageDescription == "" { self.config.ImageDescription = "Instance snapshot. Generated by packer.io" } if self.config.ImageType == "" { self.config.ImageType = IMAGE_TYPE_FLEX } if self.config.InstanceCpu == 0 { self.config.InstanceCpu = 1 } if self.config.InstanceMemory == 0 { self.config.InstanceMemory = 1024 } if self.config.InstanceNetworkSpeed == 0 { self.config.InstanceNetworkSpeed = 10 } if self.config.InstanceDiskCapacity == 0 { self.config.InstanceDiskCapacity = 25 } if self.config.Type == "" { self.config.Type = "ssh" } if self.config.SSHPort == 0 { self.config.SSHPort = 22 } if self.config.SSHUsername == "" { self.config.SSHUsername = "******" } if self.config.SSHTimeout == 0 { self.config.SSHTimeout = 5 * time.Minute } if self.config.StateTimeout == 0 { self.config.StateTimeout = 10 * time.Minute } // Check for required configurations that will display errors if not set if self.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("api_key or the SOFTLAYER_API_KEY environment variable must be specified")) } if self.config.Username == "" { errs = packer.MultiErrorAppend( errs, errors.New("username or the SOFTLAYER_USER_NAME environment variable must be specified")) } if self.config.ImageName == "" { errs = packer.MultiErrorAppend( errs, errors.New("image_name must be specified")) } if self.config.ImageType != IMAGE_TYPE_FLEX && self.config.ImageType != IMAGE_TYPE_STANDARD { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unknown image_type '%s'. Must be one of 'flex' (the default) or 'standard'.", self.config.ImageType)) } if self.config.BaseImageId == "" && self.config.BaseOsCode == "" { errs = packer.MultiErrorAppend( errs, errors.New("please specify base_image_id or base_os_code")) } if self.config.BaseImageId != "" && self.config.BaseOsCode != "" { errs = packer.MultiErrorAppend( errs, errors.New("please specify only one of base_image_id or base_os_code")) } if self.config.BaseImageId != "" && self.config.SSHPrivateKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("when using base_image_id, you must specify ssh_private_key_file "+ "since automatic ssh key config for custom images isn't supported by SoftLayer API")) } log.Println(common.ScrubConfig(self.config, self.config.APIKey, self.config.Username)) if len(errs.Errors) > 0 { return nil, errors.New(errs.Error()) } return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, }, raws...) if err != nil { return nil, err } // Optional configuration with defaults if b.config.APIKey == "" { // Default to environment variable for api_key, if it exists b.config.APIKey = os.Getenv("DIGITALOCEAN_API_KEY") } if b.config.ClientID == "" { // Default to environment variable for client_id, if it exists b.config.ClientID = os.Getenv("DIGITALOCEAN_CLIENT_ID") } if b.config.APIURL == "" { // Default to environment variable for api_url, if it exists b.config.APIURL = os.Getenv("DIGITALOCEAN_API_URL") } if b.config.APIToken == "" { // Default to environment variable for api_token, if it exists b.config.APIToken = os.Getenv("DIGITALOCEAN_API_TOKEN") } if b.config.Region == "" { if b.config.RegionID != 0 { b.config.Region = fmt.Sprintf("%v", b.config.RegionID) } else { b.config.Region = DefaultRegion } } if b.config.Size == "" { if b.config.SizeID != 0 { b.config.Size = fmt.Sprintf("%v", b.config.SizeID) } else { b.config.Size = DefaultSize } } if b.config.Image == "" { if b.config.ImageID != 0 { b.config.Image = fmt.Sprintf("%v", b.config.ImageID) } else { b.config.Image = DefaultImage } } if b.config.SnapshotName == "" { // Default to packer-{{ unix timestamp (utc) }} b.config.SnapshotName = "packer-{{timestamp}}" } if b.config.DropletName == "" { // Default to packer-[time-ordered-uuid] b.config.DropletName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) } if b.config.SSHUsername == "" { // Default to "root". You can override this if your // SourceImage has a different user account then the DO default b.config.SSHUsername = "******" } if b.config.SSHPort == 0 { // Default to port 22 per DO default b.config.SSHPort = 22 } if b.config.RawSSHTimeout == "" { // Default to 1 minute timeouts b.config.RawSSHTimeout = "1m" } if b.config.RawStateTimeout == "" { // Default to 6 minute timeouts waiting for // desired state. i.e waiting for droplet to become active b.config.RawStateTimeout = "6m" } var errs *packer.MultiError if b.config.APIToken == "" { // Required configurations that will display errors if not set if b.config.ClientID == "" { errs = packer.MultiErrorAppend( errs, errors.New("a client_id for v1 auth or api_token for v2 auth must be specified")) } if b.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("a api_key for v1 auth or api_token for v2 auth must be specified")) } } if b.config.APIURL == "" { b.config.APIURL = "https://api.digitalocean.com" } sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } b.config.sshTimeout = sshTimeout stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } b.config.stateTimeout = stateTimeout if errs != nil && len(errs.Errors) > 0 { return nil, errs } common.ScrubConfig(b.config, b.config.ClientID, b.config.APIKey) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { b.config.ctx.Funcs = awscommon.TemplateFuncs err := config.Decode(&b.config, &config.DecodeOpts{ Interpolate: true, InterpolateContext: &b.config.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "command_wrapper", "post_mount_commands", "pre_mount_commands", "mount_path", }, }, }, raws...) if err != nil { return nil, err } // Defaults if b.config.ChrootMounts == nil { b.config.ChrootMounts = make([][]string, 0) } if b.config.CopyFiles == nil { b.config.CopyFiles = make([]string, 0) } if len(b.config.ChrootMounts) == 0 { b.config.ChrootMounts = [][]string{ {"proc", "proc", "/proc"}, {"sysfs", "sysfs", "/sys"}, {"bind", "/dev", "/dev"}, {"devpts", "devpts", "/dev/pts"}, {"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"}, } } if len(b.config.CopyFiles) == 0 && !b.config.FromScratch { b.config.CopyFiles = []string{"/etc/resolv.conf"} } if b.config.CommandWrapper == "" { b.config.CommandWrapper = "{{.Command}}" } if b.config.MountPath == "" { b.config.MountPath = "/mnt/packer-amazon-chroot-volumes/{{.Device}}" } if b.config.MountPartition == 0 { b.config.MountPartition = 1 } // Accumulate any errors or warnings var errs *packer.MultiError var warns []string errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(&b.config.ctx)...) for _, mounts := range b.config.ChrootMounts { if len(mounts) != 3 { errs = packer.MultiErrorAppend( errs, errors.New("Each chroot_mounts entry should be three elements.")) break } } if b.config.FromScratch { if b.config.SourceAmi != "" || !b.config.SourceAmiFilter.Empty() { warns = append(warns, "source_ami and source_ami_filter are unused when from_scratch is true") } if b.config.RootVolumeSize == 0 { errs = packer.MultiErrorAppend( errs, errors.New("root_volume_size is required with from_scratch.")) } if len(b.config.PreMountCommands) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("pre_mount_commands is required with from_scratch.")) } if b.config.AMIVirtType == "" { errs = packer.MultiErrorAppend( errs, errors.New("ami_virtualization_type is required with from_scratch.")) } if b.config.RootDeviceName == "" { errs = packer.MultiErrorAppend( errs, errors.New("root_device_name is required with from_scratch.")) } if len(b.config.AMIMappings) == 0 { errs = packer.MultiErrorAppend( errs, errors.New("ami_block_device_mappings is required with from_scratch.")) } } else { if b.config.SourceAmi == "" && b.config.SourceAmiFilter.Empty() { errs = packer.MultiErrorAppend( errs, errors.New("source_ami or source_ami_filter is required.")) } if len(b.config.AMIMappings) != 0 { warns = append(warns, "ami_block_device_mappings are unused when from_scratch is false") } if b.config.RootDeviceName != "" { warns = append(warns, "root_device_name is unused when from_scratch is false") } } if errs != nil && len(errs.Errors) > 0 { return warns, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return warns, nil }
// Prepare processes the build configuration parameters. func (self *Builder) Prepare(raws ...interface{}) (parms []string, retErr error) { metadata, err := common.DecodeConfig(&self.config, raws...) if err != nil { return nil, err } // Check that there aren't any unknown configuration keys defined errs := common.CheckUnusedConfig(metadata) if errs == nil { errs = &packer.MultiError{} } self.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } self.config.tpl.UserVars = self.config.PackerUserVars // Assign default values if possible if self.config.APIKey == "" { // Default to environment variable for api_key, if it exists self.config.APIKey = os.Getenv("SOFTLAYER_API_KEY") } if self.config.Username == "" { // Default to environment variable for client_id, if it exists self.config.Username = os.Getenv("SOFTLAYER_USER_NAME") } if self.config.DatacenterName == "" { self.config.DatacenterName = "ams01" } if self.config.InstanceName == "" { self.config.InstanceName = fmt.Sprintf("packer-softlayer-%s", time.Now().Unix()) } if self.config.InstanceDomain == "" { self.config.InstanceDomain = "defaultdomain.com" } if self.config.ImageDescription == "" { self.config.ImageDescription = "Instance snapshot. Generated by packer.io" } if self.config.ImageType == "" { self.config.ImageType = IMAGE_TYPE_FLEX } if self.config.InstanceCpu == 0 { self.config.InstanceCpu = 1 } if self.config.InstanceMemory == 0 { self.config.InstanceMemory = 1024 } if self.config.InstanceNetworkSpeed == 0 { self.config.InstanceNetworkSpeed = 10 } if self.config.InstanceDiskCapacity == 0 { self.config.InstanceDiskCapacity = 25 } if self.config.SshPort == 0 { self.config.SshPort = 22 } if self.config.SshUserName == "" { self.config.SshUserName = "******" } if self.config.RawSshTimeout == "" { self.config.RawSshTimeout = "5m" } if self.config.RawStateTimeout == "" { self.config.RawStateTimeout = "10m" } templates := map[string]*string{ "username": &self.config.Username, "api_key": &self.config.APIKey, "datacenter_name": &self.config.DatacenterName, "base_image_id": &self.config.BaseImageId, "image_name": &self.config.ImageName, "image_description": &self.config.ImageDescription, "image_type": &self.config.ImageType, "base_os_code": &self.config.BaseOsCode, "instance_name": &self.config.InstanceName, "instance_domain": &self.config.InstanceDomain, "ssh_timeout": &self.config.RawSshTimeout, "instance_state_timeout": &self.config.RawStateTimeout, "ssh_username": &self.config.SshUserName, "ssh_private_key_file": &self.config.SshPrivateKeyFile, } for n, ptr := range templates { var err error *ptr, err = self.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing %s: %s", n, err)) } } // Check for required configurations that will display errors if not set if self.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("api_key or the SOFTLAYER_API_KEY environment variable must be specified")) } if self.config.Username == "" { errs = packer.MultiErrorAppend( errs, errors.New("username or the SOFTLAYER_USER_NAME environment variable must be specified")) } if self.config.ImageName == "" { errs = packer.MultiErrorAppend( errs, errors.New("image_name must be specified")) } if self.config.ImageType != IMAGE_TYPE_FLEX && self.config.ImageType != IMAGE_TYPE_STANDARD { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Unknown image_type '%s'. Must be one of 'flex' (the default) or 'standard'.", self.config.ImageType)) } if self.config.BaseImageId == "" && self.config.BaseOsCode == "" { errs = packer.MultiErrorAppend( errs, errors.New("please specify base_image_id or base_os_code")) } if self.config.BaseImageId != "" && self.config.BaseOsCode != "" { errs = packer.MultiErrorAppend( errs, errors.New("please specify only one of base_image_id or base_os_code")) } if self.config.BaseImageId != "" && self.config.SshPrivateKeyFile == "" { errs = packer.MultiErrorAppend( errs, errors.New("when using base_image_id, you must specify ssh_private_key_file "+ "since automatic ssh key config for custom images isn't supported by SoftLayer API")) } // Translate date configuration data from string to time format sshTimeout, err := time.ParseDuration(self.config.RawSshTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } self.config.SshTimeout = sshTimeout stateTimeout, err := time.ParseDuration(self.config.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } self.config.StateTimeout = stateTimeout log.Println(common.ScrubConfig(self.config, self.config.APIKey, self.config.Username)) if len(errs.Errors) > 0 { retErr = errors.New(errs.Error()) } return nil, retErr }
func newConfig(raws ...interface{}) (*Config, []string, error) { var c Config // Default provision timeout c.ProvisionTimeoutInMinutes = 120 c.ctx = &interpolate.Context{} err := config.Decode(&c, &config.DecodeOpts{ Interpolate: true, InterpolateContext: c.ctx, }, raws...) if err != nil { return nil, nil, err } // Defaults log.Println(fmt.Sprintf("%s: %v", "PackerUserVars", c.PackerConfig.PackerUserVars)) if c.StorageContainer == "" { c.StorageContainer = "vhds" } if c.UserName == "" { c.UserName = "******" } c.Comm.SSHUsername = c.UserName if c.Comm.SSHTimeout == 0 { c.Comm.SSHTimeout = 20 * time.Minute } randSuffix := azureCommon.RandomString("0123456789abcdefghijklmnopqrstuvwxyz", 10) c.tmpVmName = "PkrVM" + randSuffix c.tmpServiceName = "PkrSrv" + randSuffix c.tmpContainerName = "packer-provision-" + randSuffix // Check values var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, c.Comm.Prepare(c.ctx)...) if c.SubscriptionName == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("subscription_name must be specified")) } if c.PublishSettingsPath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("publish_settings_path must be specified")) } if c.StorageAccount == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("storage_account must be specified")) } if _, err := os.Stat(c.PublishSettingsPath); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("publish_settings_path is not a valid path: %s", err)) } if !(c.OSType == constants.Target_Linux) { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_type is not valid, must be one of: %s", constants.Target_Linux)) } count := 0 if c.RemoteSourceImageLink != "" { count += 1 } if c.OSImageLabel != "" { count += 1 } if c.OSImageName != "" { count += 1 } if count != 1 { errs = packer.MultiErrorAppend(errs, fmt.Errorf("One source and only one among os_image_label, os_image_label or remote_source_image_link has to be specified")) } if c.Location == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("location must be specified")) } sizeIsValid := false for _, instanceSize := range allowedVMSizes { if c.InstanceSize == instanceSize { sizeIsValid = true break } } if !sizeIsValid { errs = packer.MultiErrorAppend(errs, fmt.Errorf("instance_size is not valid, must be one of: %v", allowedVMSizes)) } for n := 0; n < len(c.DataDisks); n++ { switch v := c.DataDisks[n].(type) { case string: case int: case float64: if v != math.Floor(v) { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Data disk # %d is a fractional number, needs to be integer", n)) } c.DataDisks[n] = int(v) default: errs = packer.MultiErrorAppend(errs, fmt.Errorf("Data disk # %d is not a string to an existing VHD nor an integer number, but a %T", n, v)) } } if c.UserImageLabel == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("user_image_label must be specified")) } const userLabelRegex = "^[A-Za-z][A-Za-z0-9-_.]*[A-Za-z0-9]$" if !regexp.MustCompile(userLabelRegex).MatchString(c.UserImageLabel) { errs = packer.MultiErrorAppend(errs, fmt.Errorf("user_image_label is not valid, it should follow the pattern %s", userLabelRegex)) } c.userImageName = c.UserImageLabel if (c.VNet != "" && c.Subnet == "") || (c.Subnet != "" && c.VNet == "") { errs = packer.MultiErrorAppend(errs, fmt.Errorf("vnet and subnet need to either both be set or both be empty")) } log.Println(common.ScrubConfig(c)) if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } return &c, nil, nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { var c Config var md mapstructure.Metadata err := config.Decode(&c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", }, }, }, raws...) if err != nil { return nil, nil, err } var errs *packer.MultiError if c.Comm.SSHPassword == "" && c.Comm.SSHPrivateKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("Either ssh private key path or ssh password must be set.")) } if c.SnapshotName == "" { def, err := interpolate.Render("packer-{{timestamp}}", nil) if err != nil { panic(err) } // Default to packer-{{ unix timestamp (utc) }} c.SnapshotName = def } if c.PBUsername == "" { c.PBUsername = os.Getenv("PROFITBRICKS_USERNAME") } if c.PBPassword == "" { c.PBPassword = os.Getenv("PROFITBRICKS_PASSWORD") } if c.PBUrl == "" { c.PBUrl = "https://api.profitbricks.com/rest/v2" } if c.Cores == 0 { c.Cores = 4 } if c.Ram == 0 { c.Ram = 2048 } if c.DiskSize == 0 { c.DiskSize = 50 } if c.Region == "" { c.Region = "us/las" } if c.DiskType == "" { c.DiskType = "HDD" } if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } if c.Image == "" { errs = packer.MultiErrorAppend( errs, errors.New("ProfitBricks 'image' is required")) } if c.PBUsername == "" { errs = packer.MultiErrorAppend( errs, errors.New("ProfitBricks username is required")) } if c.PBPassword == "" { errs = packer.MultiErrorAppend( errs, errors.New("ProfitBricks password is required")) } if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } common.ScrubConfig(c, c.PBUsername) return &c, nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars // Accumulate any errors errs := common.CheckUnusedConfig(md) if b.config.APIURL == "" { // Default to environment variable for API URL b.config.APIURL = os.Getenv("CLOUDSTACK_API_URL") } if b.config.APIKey == "" { // Default to environment variable for API key b.config.APIKey = os.Getenv("CLOUDSTACK_API_KEY") } if b.config.SecretKey == "" { // Default to environment variable for API secret b.config.SecretKey = os.Getenv("CLOUDSTACK_SECRET_KEY") } if b.config.HTTPPortMin == 0 { b.config.HTTPPortMin = 8000 } if b.config.HTTPPortMax == 0 { b.config.HTTPPortMax = 9000 } if b.config.TemplateName == "" { // Default to packer-{{ unix timestamp (utc) }} b.config.TemplateName = "packer-{{timestamp}}" } if b.config.TemplateDisplayText == "" { b.config.TemplateDisplayText = "Packer_Generated_Template" } if b.config.SSHUsername == "" { // Default to "root". You can override this if your // source template has a different user account. b.config.SSHUsername = "******" } if b.config.SSHPort == 0 { // Default to port 22 b.config.SSHPort = 22 } if b.config.RawSSHTimeout == "" { // Default to 10 minute timeouts b.config.RawSSHTimeout = "10m" } if b.config.RawStateTimeout == "" { // Default to 5 minute timeouts waiting for desired // state. i.e waiting for virtual machine to become // active b.config.RawStateTimeout = "5m" } if b.config.RawDetachISOWait == "" { // Default to wait 10 seconds before detaching the ISO // from the started virtual machine. b.config.RawDetachISOWait = "10s" } templates := map[string]*string{ "api_url": &b.config.APIURL, "api_key": &b.config.APIKey, "secret_key": &b.config.SecretKey, "ssh_timeout": &b.config.RawSSHTimeout, "state_timeout": &b.config.RawStateTimeout, "detach_iso_wait": &b.config.RawDetachISOWait, "ssh_username": &b.config.SSHUsername, "ssh_key_path": &b.config.SSHKeyPath, "ssh_password": &b.config.SSHPassword, "http_directory": &b.config.HTTPDir, "service_offering_id": &b.config.ServiceOfferingId, "project_id": &b.config.ProjectId, "template_id": &b.config.TemplateId, "zone_id": &b.config.ZoneId, "disk_offering_id": &b.config.DiskOfferingId, "hypervisor": &b.config.Hypervisor, "template_name": &b.config.TemplateName, "template_display_text": &b.config.TemplateDisplayText, "template_os_id": &b.config.TemplateOSId, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } validates := map[string]*string{ "user_data": &b.config.UserData, } for n, ptr := range validates { if err := b.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } if b.config.HTTPPortMin > b.config.HTTPPortMax { errs = packer.MultiErrorAppend( errs, errors.New("http_port_min must be less than http_port_max")) } // Required configurations that will display errors if not set if b.config.APIURL == "" { errs = packer.MultiErrorAppend( errs, errors.New("CLOUDSTACK_API_URL in env (APIURL in json) must be specified")) } if b.config.APIKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("CLOUDSTACK_API_KEY in env (APIKey in json) must be specified")) } if b.config.SecretKey == "" { errs = packer.MultiErrorAppend( errs, errors.New("CLOUDSTACK_SECRET_KEY in env (SecretKey in json) must be specified")) } if b.config.ServiceOfferingId == "" { errs = packer.MultiErrorAppend( errs, errors.New("service_offering_id must be specified")) } if b.config.TemplateId == "" { errs = packer.MultiErrorAppend( errs, errors.New("template_id must be specified")) } if b.config.ZoneId == "" { errs = packer.MultiErrorAppend( errs, errors.New("zone_id must be specified")) } sshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing ssh_timeout: %s", err)) } b.config.sshTimeout = sshTimeout detachISOWait, err := time.ParseDuration(b.config.RawDetachISOWait) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing iso_detach_wait: %s", err)) } b.config.detachISOWait = detachISOWait stateTimeout, err := time.ParseDuration(b.config.RawStateTimeout) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Failed parsing state_timeout: %s", err)) } b.config.stateTimeout = stateTimeout newTags := make(map[string]string) for k, v := range b.config.TemplateTags { k, err := b.config.tpl.Process(k, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing tag key %s: %s", k, err)) continue } v, err := b.config.tpl.Process(v, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing tag value '%s': %s", v, err)) continue } newTags[k] = v } b.config.TemplateTags = newTags if errs != nil && len(errs.Errors) > 0 { return nil, errs } common.ScrubConfig(b.config, b.config.APIKey, b.config.SecretKey) return nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) // Defaults if b.config.ChrootMounts == nil { b.config.ChrootMounts = make([][]string, 0) } if b.config.CopyFiles == nil { b.config.CopyFiles = make([]string, 0) } if len(b.config.ChrootMounts) == 0 { b.config.ChrootMounts = [][]string{ []string{"proc", "proc", "/proc"}, []string{"sysfs", "sysfs", "/sys"}, []string{"bind", "/dev", "/dev"}, []string{"devpts", "devpts", "/dev/pts"}, []string{"binfmt_misc", "binfmt_misc", "/proc/sys/fs/binfmt_misc"}, } } if len(b.config.CopyFiles) == 0 { b.config.CopyFiles = []string{"/etc/resolv.conf"} } if b.config.CommandWrapper == "" { b.config.CommandWrapper = "{{.Command}}" } if b.config.MountPath == "" { b.config.MountPath = "packer-amazon-chroot-volumes/{{.Device}}" } // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) for i, mounts := range b.config.ChrootMounts { if len(mounts) != 3 { errs = packer.MultiErrorAppend( errs, errors.New("Each chroot_mounts entry should be three elements.")) break } for j, entry := range mounts { b.config.ChrootMounts[i][j], err = b.config.tpl.Process(entry, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing chroot_mounts[%d][%d]: %s", i, j, err)) } } } for i, file := range b.config.CopyFiles { var err error b.config.CopyFiles[i], err = b.config.tpl.Process(file, nil) if err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("Error processing copy_files[%d]: %s", i, err)) } } if b.config.SourceAmi == "" { errs = packer.MultiErrorAppend(errs, errors.New("source_ami is required.")) } templates := map[string]*string{ "device_path": &b.config.DevicePath, "source_ami": &b.config.SourceAmi, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
func newConfig(raws ...interface{}) (*Config, []string, error) { var c Config c.ctx = &interpolate.Context{} err := config.Decode(&c, &config.DecodeOpts{ Interpolate: true, InterpolateContext: c.ctx, }, raws...) if err != nil { return nil, nil, err } // Defaults log.Println(fmt.Sprintf("%s: %v", "PackerUserVars", c.PackerConfig.PackerUserVars)) if c.StorageContainer == "" { c.StorageContainer = "vhds" } if c.UserName == "" { c.UserName = "******" } c.Comm.SSHUsername = c.UserName if c.Comm.SSHTimeout == 0 { c.Comm.SSHTimeout = 20 * time.Minute } randSuffix := utils.RandomString("0123456789abcdefghijklmnopqrstuvwxyz", 10) c.tmpVmName = "PkrVM" + randSuffix c.tmpServiceName = "PkrSrv" + randSuffix c.tmpContainerName = "packer-provision-" + randSuffix // Check values var errs *packer.MultiError errs = packer.MultiErrorAppend(errs, c.Comm.Prepare(c.ctx)...) if c.SubscriptionName == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("subscription_name must be specified")) } if c.PublishSettingsPath == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("publish_settings_path must be specified")) } if c.StorageAccount == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("storage_account must be specified")) } if _, err := os.Stat(c.PublishSettingsPath); err != nil { errs = packer.MultiErrorAppend(errs, fmt.Errorf("publish_settings_path is not a valid path: %s", err)) } if !(c.OSType == constants.Target_Linux || c.OSType == constants.Target_Windows) { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_type is not valid, must be one of: %s, %s", constants.Target_Windows, constants.Target_Linux)) } if c.RemoteSourceImageLink == "" && c.OSImageLabel == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_image_label or remote_source_image_link must be specified")) } if c.RemoteSourceImageLink != "" && c.OSImageLabel != "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("os_image_label and remote_source_image_link cannot both be specified")) } if c.Location == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("location must be specified")) } sizeIsValid := false for _, instanceSize := range allowedVMSizes { if c.InstanceSize == instanceSize { sizeIsValid = true break } } if !sizeIsValid { errs = packer.MultiErrorAppend(errs, fmt.Errorf("instance_size is not valid, must be one of: %v", allowedVMSizes)) } if c.UserImageLabel == "" { errs = packer.MultiErrorAppend(errs, fmt.Errorf("user_image_label must be specified")) } const userLabelRegex = "^[A-Za-z][A-Za-z0-9-_.]*[A-Za-z0-9]$" if !regexp.MustCompile(userLabelRegex).MatchString(c.UserImageLabel) { errs = packer.MultiErrorAppend(errs, fmt.Errorf("user_image_label is not valid, it should follow the pattern %s", userLabelRegex)) } c.userImageName = fmt.Sprintf("%s_%s", c.UserImageLabel, time.Now().Format("2006-01-02_15-04")) if (c.VNet != "" && c.Subnet == "") || (c.Subnet != "" && c.VNet == "") { errs = packer.MultiErrorAppend(errs, fmt.Errorf("vnet and subnet need to either both be set or both be empty")) } log.Println(common.ScrubConfig(c)) if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } return &c, nil, nil }
func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { md, err := common.DecodeConfig(&b.config, raws...) if err != nil { return nil, err } b.config.tpl, err = packer.NewConfigTemplate() if err != nil { return nil, err } b.config.tpl.UserVars = b.config.PackerUserVars b.config.tpl.Funcs(awscommon.TemplateFuncs) if b.config.BundleDestination == "" { b.config.BundleDestination = "/tmp" } if b.config.BundlePrefix == "" { b.config.BundlePrefix = "image-{{timestamp}}" } if b.config.BundleUploadCommand == "" { b.config.BundleUploadCommand = "sudo -n ec2-upload-bundle " + "-b {{.BucketName}} " + "-m {{.ManifestPath}} " + "-a {{.AccessKey}} " + "-s {{.SecretKey}} " + "-d {{.BundleDirectory}} " + "--batch " + "--url {{.S3Endpoint}} " + "--retry" } if b.config.BundleVolCommand == "" { b.config.BundleVolCommand = "sudo -n ec2-bundle-vol " + "-k {{.KeyPath}} " + "-u {{.AccountId}} " + "-c {{.CertPath}} " + "-r {{.Architecture}} " + "-e {{.PrivatePath}}/* " + "-d {{.Destination}} " + "-p {{.Prefix}} " + "--batch" } if b.config.X509UploadPath == "" { b.config.X509UploadPath = "/tmp" } // Accumulate any errors errs := common.CheckUnusedConfig(md) errs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...) errs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...) validates := map[string]*string{ "bundle_upload_command": &b.config.BundleUploadCommand, "bundle_vol_command": &b.config.BundleVolCommand, } for n, ptr := range validates { if err := b.config.tpl.Validate(*ptr); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error parsing %s: %s", n, err)) } } templates := map[string]*string{ "account_id": &b.config.AccountId, "ami_name": &b.config.AMIName, "bundle_destination": &b.config.BundleDestination, "bundle_prefix": &b.config.BundlePrefix, "s3_bucket": &b.config.S3Bucket, "x509_cert_path": &b.config.X509CertPath, "x509_key_path": &b.config.X509KeyPath, "x509_upload_path": &b.config.X509UploadPath, } for n, ptr := range templates { var err error *ptr, err = b.config.tpl.Process(*ptr, nil) if err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("Error processing %s: %s", n, err)) } } if b.config.AccountId == "" { errs = packer.MultiErrorAppend(errs, errors.New("account_id is required")) } else { b.config.AccountId = strings.Replace(b.config.AccountId, "-", "", -1) } if b.config.S3Bucket == "" { errs = packer.MultiErrorAppend(errs, errors.New("s3_bucket is required")) } if b.config.X509CertPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_cert_path is required")) } else if _, err := os.Stat(b.config.X509CertPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_cert_path points to bad file: %s", err)) } if b.config.X509KeyPath == "" { errs = packer.MultiErrorAppend(errs, errors.New("x509_key_path is required")) } else if _, err := os.Stat(b.config.X509KeyPath); err != nil { errs = packer.MultiErrorAppend( errs, fmt.Errorf("x509_key_path points to bad file: %s", err)) } if errs != nil && len(errs.Errors) > 0 { return nil, errs } log.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey)) return nil, nil }
func NewConfig(raws ...interface{}) (*Config, []string, error) { var c Config var md mapstructure.Metadata err := config.Decode(&c, &config.DecodeOpts{ Metadata: &md, Interpolate: true, InterpolateContext: &c.ctx, InterpolateFilter: &interpolate.RenderFilter{ Exclude: []string{ "run_command", }, }, }, raws...) if err != nil { return nil, nil, err } var errs *packer.MultiError if c.SnapshotName == "" { def, err := interpolate.Render("packer-{{timestamp}}", nil) if err != nil { panic(err) } // Default to packer-{{ unix timestamp (utc) }} c.SnapshotName = def } if c.Image == "" { errs = packer.MultiErrorAppend( errs, errors.New("1&1 'image' is required")) } if c.Token == "" { c.Token = os.Getenv("ONEANDONE_TOKEN") } if c.Url == "" { c.Url = oneandone.BaseUrl } if c.DiskSize == 0 { c.DiskSize = 50 } if c.Retries == 0 { c.Retries = 600 } if c.DataCenterName != "" { token := oneandone.SetToken(c.Token) //Create an API client api := oneandone.New(token, c.Url) dcs, err := api.ListDatacenters() if err != nil { errs = packer.MultiErrorAppend( errs, err) } for _, dc := range dcs { if strings.ToLower(dc.CountryCode) == strings.ToLower(c.DataCenterName) { c.DataCenterId = dc.Id break } } } if es := c.Comm.Prepare(&c.ctx); len(es) > 0 { errs = packer.MultiErrorAppend(errs, es...) } if errs != nil && len(errs.Errors) > 0 { return nil, nil, errs } common.ScrubConfig(c, c.Token) return &c, nil, nil }