// createAppSlug makes an archive of the app with (otto-specific exclusions) // and yields a path to a tempfile containing that archive // // TODO: allow customization of the Exclude patterns func createAppSlug(path string) (string, error) { archive, err := archive.CreateArchive(path, &archive.ArchiveOpts{ Exclude: []string{".otto", ".vagrant"}, VCS: true, }) if err != nil { return "", err } defer archive.Close() // Archive is just a reader, and we need it in a file. The below seems // fiddly, could there be a better way? slug, err := ioutil.TempFile("", "otto-slug-") if err != nil { return "", err } _, err = io.Copy(slug, archive) cerr := slug.Close() if err != nil { return "", err } if cerr != nil { return "", err } return slug.Name(), nil }
// WriteArchive writes the contents of the ScriptPack as a tar gzip to the // given path. func (s *ScriptPack) WriteArchive(dst string) error { // Let's just open the file we're going to write to first to verify // we can write there since everything else is pointless if we can't. f, err := os.Create(dst) if err != nil { return err } defer f.Close() // Create a temporary directory to store the raw ScriptPack data td, err := ioutil.TempDir("", "otto") if err != nil { return err } defer os.RemoveAll(td) // Write the ScriptPack if err := s.Write(td); err != nil { return err } // Archive this ScriptPack a, err := archive.CreateArchive(td, &archive.ArchiveOpts{ VCS: false, }) if err != nil { return err } defer a.Close() // Write the archive to final path _, err = io.Copy(f, a) return err }
func (c *PushCommand) Run(args []string) int { var atlasAddress, atlasToken string var archiveVCS, moduleUpload bool var name string var overwrite []string args = c.Meta.process(args, true) cmdFlags := c.Meta.flagSet("push") cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "") cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") cmdFlags.StringVar(&atlasToken, "token", "", "") cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "") cmdFlags.StringVar(&name, "name", "", "") cmdFlags.BoolVar(&archiveVCS, "vcs", true, "") cmdFlags.Var((*FlagStringSlice)(&overwrite), "overwrite", "") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } // Make a map of the set values overwriteMap := make(map[string]struct{}, len(overwrite)) for _, v := range overwrite { overwriteMap[v] = struct{}{} } // This is a map of variables specifically from the CLI that we want to overwrite. // We need this because there is a chance that the user is trying to modify // a variable we don't see in our context, but which exists in this atlas // environment. cliVars := make(map[string]string) for k, v := range c.variables { if _, ok := overwriteMap[k]; ok { if val, ok := v.(string); ok { cliVars[k] = val } else { c.Ui.Error(fmt.Sprintf("Error reading value for variable: %s", k)) return 1 } } } // The pwd is used for the configuration path if one is not given pwd, err := os.Getwd() if err != nil { c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) return 1 } // Get the path to the configuration depending on the args. var configPath string args = cmdFlags.Args() if len(args) > 1 { c.Ui.Error("The apply command expects at most one argument.") cmdFlags.Usage() return 1 } else if len(args) == 1 { configPath = args[0] } else { configPath = pwd } // Verify the state is remote, we can't push without a remote state s, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf("Failed to read state: %s", err)) return 1 } if !s.State().IsRemote() { c.Ui.Error( "Remote state is not enabled. For Atlas to run Terraform\n" + "for you, remote state must be used and configured. Remote\n" + "state via any backend is accepted, not just Atlas. To\n" + "configure remote state, use the `terraform remote config`\n" + "command.") return 1 } // Build the context based on the arguments given ctx, planned, err := c.Context(contextOpts{ Path: configPath, StatePath: c.Meta.statePath, }) if err != nil { c.Ui.Error(err.Error()) return 1 } if planned { c.Ui.Error( "A plan file cannot be given as the path to the configuration.\n" + "A path to a module (directory with configuration) must be given.") return 1 } // Get the configuration config := ctx.Module().Config() if name == "" { if config.Atlas == nil || config.Atlas.Name == "" { c.Ui.Error( "The name of this Terraform configuration in Atlas must be\n" + "specified within your configuration or the command-line. To\n" + "set it on the command-line, use the `-name` parameter.") return 1 } name = config.Atlas.Name } // Initialize the client if it isn't given. if c.client == nil { // Make sure to nil out our client so our token isn't sitting around defer func() { c.client = nil }() // Initialize it to the default client, we set custom settings later client := atlas.DefaultClient() if atlasAddress != "" { client, err = atlas.NewClient(atlasAddress) if err != nil { c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err)) return 1 } } client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) if atlasToken != "" { client.Token = atlasToken } c.client = &atlasPushClient{Client: client} } // Get the variables we already have in atlas atlasVars, err := c.client.Get(name) if err != nil { c.Ui.Error(fmt.Sprintf( "Error looking up previously pushed configuration: %s", err)) return 1 } // Set remote variables in the context if we don't have a value here. These // don't have to be correct, it just prevents the Input walk from prompting // the user for input. ctxVars := ctx.Variables() atlasVarSentry := "ATLAS_78AC153CA649EAA44815DAD6CBD4816D" for k, _ := range atlasVars { if _, ok := ctxVars[k]; !ok { ctx.SetVariable(k, atlasVarSentry) } } // Ask for input if err := ctx.Input(c.InputMode()); err != nil { c.Ui.Error(fmt.Sprintf( "Error while asking for variable input:\n\n%s", err)) return 1 } // Now that we've gone through the input walk, we can be sure we have all // the variables we're going to get. // We are going to keep these separate from the atlas variables until // upload, so we can notify the user which local variables we're sending. serializedVars, err := tfVars(ctx.Variables()) if err != nil { c.Ui.Error(fmt.Sprintf( "An error has occurred while serializing the variables for uploading:\n"+ "%s", err)) return 1 } // Build the archiving options, which includes everything it can // by default according to VCS rules but forcing the data directory. archiveOpts := &archive.ArchiveOpts{ VCS: archiveVCS, Extra: map[string]string{ DefaultDataDir: c.DataDir(), }, } if !moduleUpload { // If we're not uploading modules, then exclude the modules dir. archiveOpts.Exclude = append( archiveOpts.Exclude, filepath.Join(c.DataDir(), "modules")) } archiveR, err := archive.CreateArchive(configPath, archiveOpts) if err != nil { c.Ui.Error(fmt.Sprintf( "An error has occurred while archiving the module for uploading:\n"+ "%s", err)) return 1 } // List of the vars we're uploading to display to the user. // We always upload all vars from atlas, but only report them if they are overwritten. var setVars []string // variables to upload var uploadVars []atlas.TFVar // first add all the variables we want to send which have been serialized // from the local context. for _, sv := range serializedVars { _, inOverwrite := overwriteMap[sv.Key] _, inAtlas := atlasVars[sv.Key] // We have a variable that's not in atlas, so always send it. if !inAtlas { uploadVars = append(uploadVars, sv) setVars = append(setVars, sv.Key) } // We're overwriting an atlas variable. // We also want to check that we // don't send the dummy sentry value back to atlas. This could happen // if it's specified as an overwrite on the cli, but we didn't set a // new value. if inAtlas && inOverwrite && sv.Value != atlasVarSentry { uploadVars = append(uploadVars, sv) setVars = append(setVars, sv.Key) // remove this value from the atlas vars, because we're going to // send back the remainder regardless. delete(atlasVars, sv.Key) } } // now send back all the existing atlas vars, inserting any overwrites from the cli. for k, av := range atlasVars { if v, ok := cliVars[k]; ok { av.Value = v setVars = append(setVars, k) } uploadVars = append(uploadVars, av) } sort.Strings(setVars) if len(setVars) > 0 { c.Ui.Output( "The following variables will be set or overwritten within Atlas from\n" + "their local values. All other variables are already set within Atlas.\n" + "If you want to modify the value of a variable, use the Atlas web\n" + "interface or set it locally and use the -overwrite flag.\n\n") for _, v := range setVars { c.Ui.Output(fmt.Sprintf(" * %s", v)) } // Newline c.Ui.Output("") } // Upsert! opts := &pushUpsertOptions{ Name: name, Archive: archiveR, Variables: ctx.Variables(), TFVars: uploadVars, } c.Ui.Output("Uploading Terraform configuration...") vsn, err := c.client.Upsert(opts) if err != nil { c.Ui.Error(fmt.Sprintf( "An error occurred while uploading the module:\n\n%s", err)) return 1 } c.Ui.Output(c.Colorize().Color(fmt.Sprintf( "[reset][bold][green]Configuration %q uploaded! (v%d)", name, vsn))) return 0 }
func (c *PushCommand) Run(args []string) int { var atlasAddress, atlasToken string var archiveVCS, moduleUpload bool var name string args = c.Meta.process(args, true) cmdFlags := c.Meta.flagSet("push") cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "") cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") cmdFlags.StringVar(&atlasToken, "token", "", "") cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "") cmdFlags.StringVar(&name, "name", "", "") cmdFlags.BoolVar(&archiveVCS, "vcs", true, "") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } // The pwd is used for the configuration path if one is not given pwd, err := os.Getwd() if err != nil { c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) return 1 } // Get the path to the configuration depending on the args. var configPath string args = cmdFlags.Args() if len(args) > 1 { c.Ui.Error("The apply command expects at most one argument.") cmdFlags.Usage() return 1 } else if len(args) == 1 { configPath = args[0] } else { configPath = pwd } // Verify the state is remote, we can't push without a remote state s, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf("Failed to read state: %s", err)) return 1 } if !s.State().IsRemote() { c.Ui.Error( "Remote state is not enabled. For Atlas to run Terraform\n" + "for you, remote state must be used and configured. Remote\n" + "state via any backend is accepted, not just Atlas. To\n" + "configure remote state, use the `terraform remote config`\n" + "command.") return 1 } // Build the context based on the arguments given ctx, planned, err := c.Context(contextOpts{ Path: configPath, StatePath: c.Meta.statePath, }) if err != nil { c.Ui.Error(err.Error()) return 1 } if planned { c.Ui.Error( "A plan file cannot be given as the path to the configuration.\n" + "A path to a module (directory with configuration) must be given.") return 1 } // Get the configuration config := ctx.Module().Config() if name == "" { if config.Atlas == nil || config.Atlas.Name == "" { c.Ui.Error( "The name of this Terraform configuration in Atlas must be\n" + "specified within your configuration or the command-line. To\n" + "set it on the command-line, use the `-name` parameter.") return 1 } name = config.Atlas.Name } // Initialize the client if it isn't given. if c.client == nil { // Make sure to nil out our client so our token isn't sitting around defer func() { c.client = nil }() // Initialize it to the default client, we set custom settings later client := atlas.DefaultClient() if atlasAddress != "" { client, err = atlas.NewClient(atlasAddress) if err != nil { c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err)) return 1 } } if atlasToken != "" { client.Token = atlasToken } c.client = &atlasPushClient{Client: client} } // Get the variables we might already have vars, err := c.client.Get(name) if err != nil { c.Ui.Error(fmt.Sprintf( "Error looking up previously pushed configuration: %s", err)) return 1 } for k, v := range vars { // Local variables override remote ones if _, exists := ctx.Variables()[k]; exists { continue } ctx.SetVariable(k, v) } // Ask for input if err := ctx.Input(c.InputMode()); err != nil { c.Ui.Error(fmt.Sprintf( "Error while asking for variable input:\n\n%s", err)) return 1 } // Build the archiving options, which includes everything it can // by default according to VCS rules but forcing the data directory. archiveOpts := &archive.ArchiveOpts{ VCS: archiveVCS, Extra: map[string]string{ DefaultDataDir: c.DataDir(), }, } if !moduleUpload { // If we're not uploading modules, then exclude the modules dir. archiveOpts.Exclude = append( archiveOpts.Exclude, filepath.Join(c.DataDir(), "modules")) } archiveR, err := archive.CreateArchive(configPath, archiveOpts) if err != nil { c.Ui.Error(fmt.Sprintf( "An error has occurred while archiving the module for uploading:\n"+ "%s", err)) return 1 } // Upsert! opts := &pushUpsertOptions{ Name: name, Archive: archiveR, Variables: ctx.Variables(), } vsn, err := c.client.Upsert(opts) if err != nil { c.Ui.Error(fmt.Sprintf( "An error occurred while uploading the module:\n\n%s", err)) return 1 } c.Ui.Output(c.Colorize().Color(fmt.Sprintf( "[reset][bold][green]Configuration %q uploaded! (v%d)", name, vsn))) return 0 }
// Run invokes the CLI with the given arguments. The first argument is always // the name of the application. This method slices accordingly. func (cli *CLI) Run(args []string) int { // Initialize the logger to start (overridden later if debug is given) cli.initLogger(os.Getenv("ATLAS_LOG")) var debug, version bool var archiveOpts archive.ArchiveOpts var uploadOpts UploadOpts flags := flag.NewFlagSet(Name, flag.ContinueOnError) flags.SetOutput(cli.errStream) flags.Usage = func() { fmt.Fprintf(cli.errStream, usage, Name) } flags.BoolVar(&archiveOpts.VCS, "vcs", false, "use VCS to detect which files to upload") flags.StringVar(&uploadOpts.URL, "address", "", "Atlas server address") flags.StringVar(&uploadOpts.Token, "token", "", "Atlas API token") flags.Var((*FlagSliceVar)(&archiveOpts.Exclude), "exclude", "files/folders to exclude") flags.Var((*FlagSliceVar)(&archiveOpts.Include), "include", "files/folders to include") flags.Var((*FlagMetadataVar)(&uploadOpts.Metadata), "metadata", "arbitrary metadata to pass along with the request") flags.BoolVar(&debug, "debug", false, "turn on debug output") flags.BoolVar(&version, "version", false, "display the version") // Parse all the flags if err := flags.Parse(args[1:]); err != nil { return ExitCodeParseFlagsError } // Turn on debug mode if requested if debug { levelFilter.SetMinLevel(logutils.LogLevel("DEBUG")) } // Version if version { fmt.Fprintf(cli.errStream, "%s v%s\n", Name, Version) return ExitCodeOK } // Get the parsed arguments (the ones left over after all the flags have been // parsed) parsedArgs := flags.Args() if len(parsedArgs) != 2 { fmt.Fprintf(cli.errStream, "cli: must specify two arguments - slug, path\n") flags.Usage() return ExitCodeBadArgs } // Get the name of the app and the path to archive slug, path := parsedArgs[0], parsedArgs[1] uploadOpts.Slug = slug // Get the archive reader r, err := archive.CreateArchive(path, &archiveOpts) if err != nil { fmt.Fprintf(cli.errStream, "error archiving: %s\n", err) return ExitCodeArchiveError } defer r.Close() // Put a progress bar around the reader pr := &ioprogress.Reader{ Reader: r, Size: r.Size, DrawFunc: ioprogress.DrawTerminalf(os.Stdout, func(p, t int64) string { return fmt.Sprintf( "Uploading %s: %s", slug, ioprogress.DrawTextFormatBytes(p, t)) }), } // Start the upload doneCh, uploadErrCh, err := Upload(pr, r.Size, &uploadOpts) if err != nil { fmt.Fprintf(cli.errStream, "error starting upload: %s\n", err) return ExitCodeUploadError } select { case err := <-uploadErrCh: fmt.Fprintf(cli.errStream, "error uploading: %s\n", err) return ExitCodeUploadError case version := <-doneCh: fmt.Printf("Uploaded %s v%d\n", slug, version) } return ExitCodeOK }
func (c *PushCommand) Run(args []string) int { var create bool var token string f := flag.NewFlagSet("push", flag.ContinueOnError) f.Usage = func() { c.Ui.Error(c.Help()) } f.BoolVar(&create, "create", false, "create") f.StringVar(&token, "token", "", "token") if err := f.Parse(args); err != nil { return 1 } args = f.Args() if len(args) != 1 { f.Usage() return 1 } // Read the template tpl, err := packer.ParseTemplateFile(args[0], nil) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } // Validate some things if tpl.Push.Name == "" { c.Ui.Error(fmt.Sprintf( "The 'push' section must be specified in the template with\n" + "at least the 'name' option set.")) return 1 } // Determine our token if token == "" { token = tpl.Push.Token } // Build our client defer func() { c.client = nil }() c.client = atlas.DefaultClient() if tpl.Push.Address != "" { c.client, err = atlas.NewClient(tpl.Push.Address) if err != nil { c.Ui.Error(fmt.Sprintf( "Error setting up API client: %s", err)) return 1 } } if token != "" { c.client.Token = token } // Build the archiving options var opts archive.ArchiveOpts opts.Include = tpl.Push.Include opts.Exclude = tpl.Push.Exclude opts.VCS = tpl.Push.VCS opts.Extra = map[string]string{ archiveTemplateEntry: args[0], } // Determine the path we're archiving. This logic is a bit complicated // as there are three possibilities: // // 1.) BaseDir is an absolute path, just use that. // // 2.) BaseDir is empty, so we use the directory of the template. // // 3.) BaseDir is relative, so we use the path relative to the directory // of the template. // path := tpl.Push.BaseDir if path == "" || !filepath.IsAbs(path) { tplPath, err := filepath.Abs(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err)) return 1 } tplPath = filepath.Dir(tplPath) if path != "" { tplPath = filepath.Join(tplPath, path) } path, err = filepath.Abs(tplPath) if err != nil { c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err)) return 1 } } // Find the Atlas post-processors, if possible var atlasPPs []packer.RawPostProcessorConfig for _, list := range tpl.PostProcessors { for _, pp := range list { if pp.Type == "atlas" { atlasPPs = append(atlasPPs, pp) } } } // Build the upload options var uploadOpts uploadOpts uploadOpts.Slug = tpl.Push.Name uploadOpts.Builds = make(map[string]*uploadBuildInfo) for _, b := range tpl.Builders { info := &uploadBuildInfo{Type: b.Type} // Determine if we're artifacting this build for _, pp := range atlasPPs { if !pp.Skip(b.Name) { info.Artifact = true break } } uploadOpts.Builds[b.Name] = info } // Warn about builds not having post-processors. var badBuilds []string for name, b := range uploadOpts.Builds { if b.Artifact { continue } badBuilds = append(badBuilds, name) } if len(badBuilds) > 0 { c.Ui.Error(fmt.Sprintf( "Warning! One or more of the builds in this template does not\n"+ "have an Atlas post-processor. Artifacts from this template will\n"+ "not appear in the Atlas artifact registry.\n\n"+ "This is just a warning. Atlas will still build your template\n"+ "and assume other post-processors are sending the artifacts where\n"+ "they need to go.\n\n"+ "Builds: %s\n\n", strings.Join(badBuilds, ", "))) } // Create the build config if it doesn't currently exist. if err := c.create(uploadOpts.Slug, create); err != nil { c.Ui.Error(err.Error()) return 1 } // Start the archiving process r, err := archive.CreateArchive(path, &opts) if err != nil { c.Ui.Error(fmt.Sprintf("Error archiving: %s", err)) return 1 } defer r.Close() // Start the upload process doneCh, uploadErrCh, err := c.upload(r, &uploadOpts) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err)) return 1 } // Make a ctrl-C channel sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, os.Interrupt) defer signal.Stop(sigCh) err = nil select { case err = <-uploadErrCh: err = fmt.Errorf("Error uploading: %s", err) case <-sigCh: err = fmt.Errorf("Push cancelled from Ctrl-C") case <-doneCh: } if err != nil { c.Ui.Error(err.Error()) return 1 } c.Ui.Output(fmt.Sprintf("Push successful to '%s'", tpl.Push.Name)) return 0 }
func (c *PushCommand) Run(args []string) int { var token string var message string var name string var create bool f := c.Meta.FlagSet("push", FlagSetVars) f.Usage = func() { c.Ui.Error(c.Help()) } f.StringVar(&token, "token", "", "token") f.StringVar(&message, "m", "", "message") f.StringVar(&message, "message", "", "message") f.StringVar(&name, "name", "", "name") f.BoolVar(&create, "create", false, "create (deprecated)") if err := f.Parse(args); err != nil { return 1 } if message != "" { c.Ui.Say("[DEPRECATED] -m/-message is deprecated and will be removed in a future Packer release") } args = f.Args() if len(args) != 1 { f.Usage() return 1 } // Print deprecations if create { c.Ui.Error(fmt.Sprintf("The '-create' option is now the default and is\n" + "longer used. It will be removed in the next version.")) } // Parse the template tpl, err := template.ParseFile(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } // Get the core core, err := c.Meta.Core(tpl) if err != nil { c.Ui.Error(err.Error()) return 1 } push := core.Template.Push // If we didn't pass name from the CLI, use the template if name == "" { name = push.Name } // Validate some things if name == "" { c.Ui.Error(fmt.Sprintf( "The 'push' section must be specified in the template with\n" + "at least the 'name' option set. Alternatively, you can pass the\n" + "name parameter from the CLI.")) return 1 } if !reName.MatchString(name) { c.Ui.Error(errInvalidName.Error()) return 1 } // Determine our token if token == "" { token = push.Token } // Build our client defer func() { c.client = nil }() c.client = atlas.DefaultClient() if push.Address != "" { c.client, err = atlas.NewClient(push.Address) if err != nil { c.Ui.Error(fmt.Sprintf( "Error setting up API client: %s", err)) return 1 } } if token != "" { c.client.Token = token } // Build the archiving options var opts archive.ArchiveOpts opts.Include = push.Include opts.Exclude = push.Exclude opts.VCS = push.VCS opts.Extra = map[string]string{ archiveTemplateEntry: args[0], } // Determine the path we're archiving. This logic is a bit complicated // as there are three possibilities: // // 1.) BaseDir is an absolute path, just use that. // // 2.) BaseDir is empty, so we use the directory of the template. // // 3.) BaseDir is relative, so we use the path relative to the directory // of the template. // path := push.BaseDir if path == "" || !filepath.IsAbs(path) { tplPath, err := filepath.Abs(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err)) return 1 } tplPath = filepath.Dir(tplPath) if path != "" { tplPath = filepath.Join(tplPath, path) } path, err = filepath.Abs(tplPath) if err != nil { c.Ui.Error(fmt.Sprintf("Error determining path to archive: %s", err)) return 1 } } // Find the Atlas post-processors, if possible var atlasPPs []*template.PostProcessor for _, list := range tpl.PostProcessors { for _, pp := range list { if pp.Type == "atlas" { atlasPPs = append(atlasPPs, pp) } } } // Build the upload options var uploadOpts uploadOpts uploadOpts.Slug = name uploadOpts.Builds = make(map[string]*uploadBuildInfo) for _, b := range tpl.Builders { info := &uploadBuildInfo{Type: b.Type} // Determine if we're artifacting this build for _, pp := range atlasPPs { if !pp.Skip(b.Name) { info.Artifact = true break } } uploadOpts.Builds[b.Name] = info } // Add the upload metadata metadata := make(map[string]interface{}) if message != "" { metadata["message"] = message } metadata["template"] = tpl.RawContents metadata["template_name"] = filepath.Base(args[0]) uploadOpts.Metadata = metadata // Warn about builds not having post-processors. var badBuilds []string for name, b := range uploadOpts.Builds { if b.Artifact { continue } badBuilds = append(badBuilds, name) } if len(badBuilds) > 0 { c.Ui.Error(fmt.Sprintf( "Warning! One or more of the builds in this template does not\n"+ "have an Atlas post-processor. Artifacts from this template will\n"+ "not appear in the Atlas artifact registry.\n\n"+ "This is just a warning. Atlas will still build your template\n"+ "and assume other post-processors are sending the artifacts where\n"+ "they need to go.\n\n"+ "Builds: %s\n\n", strings.Join(badBuilds, ", "))) } // Start the archiving process r, err := archive.CreateArchive(path, &opts) if err != nil { c.Ui.Error(fmt.Sprintf("Error archiving: %s", err)) return 1 } defer r.Close() // Start the upload process doneCh, uploadErrCh, err := c.upload(r, &uploadOpts) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting upload: %s", err)) return 1 } // Make a ctrl-C channel sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, os.Interrupt) defer signal.Stop(sigCh) err = nil select { case err = <-uploadErrCh: err = fmt.Errorf("Error uploading: %s", err) case <-sigCh: err = fmt.Errorf("Push cancelled from Ctrl-C") case <-doneCh: } if err != nil { c.Ui.Error(err.Error()) return 1 } c.Ui.Say(fmt.Sprintf("Push successful to '%s'", name)) return 0 }
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { if _, err := p.client.Artifact(p.config.user, p.config.name); err != nil { if err != atlas.ErrNotFound { return nil, false, fmt.Errorf( "Error finding artifact: %s", err) } // Artifact doesn't exist, create it ui.Message(fmt.Sprintf("Creating artifact: %s", p.config.Artifact)) _, err = p.client.CreateArtifact(p.config.user, p.config.name) if err != nil { return nil, false, fmt.Errorf( "Error creating artifact: %s", err) } } opts := &atlas.UploadArtifactOpts{ User: p.config.user, Name: p.config.name, Type: p.config.Type, ID: artifact.Id(), Metadata: p.metadata(artifact), BuildId: p.config.buildId, } if fs := artifact.Files(); len(fs) > 0 { var archiveOpts archive.ArchiveOpts // We have files. We want to compress/upload them. If we have just // one file, then we use it as-is. Otherwise, we compress all of // them into a single file. var path string if len(fs) == 1 { path = fs[0] } else { path = longestCommonPrefix(fs) if path == "" { return nil, false, fmt.Errorf( "No common prefix for achiving files: %v", fs) } // Modify the archive options to only include the files // that are in our file list. include := make([]string, 0, len(fs)) for i, f := range fs { include[i] = strings.Replace(f, path, "", 1) } archiveOpts.Include = include } r, err := archive.CreateArchive(path, &archiveOpts) if err != nil { return nil, false, fmt.Errorf( "Error archiving artifact: %s", err) } defer r.Close() opts.File = r opts.FileSize = r.Size } ui.Message("Uploading artifact version...") var av *atlas.ArtifactVersion doneCh := make(chan struct{}) errCh := make(chan error, 1) go func() { var err error av, err = p.client.UploadArtifact(opts) if err != nil { errCh <- err return } close(doneCh) }() select { case err := <-errCh: return nil, false, fmt.Errorf("Error uploading: %s", err) case <-doneCh: } return &Artifact{ Name: p.config.Artifact, Type: p.config.Type, Version: av.Version, }, true, nil }