func setup(t *testing.T) (packer.Ui, packer.Artifact, error) { // Create fake UI and Cache ui := packer.TestUi(t) cache := &packer.FileCache{CacheDir: os.TempDir()} // Create config for file builder const fileConfig = `{"builders":[{"type":"file","target":"package.txt","content":"Hello world!"}]}` tpl, err := template.Parse(strings.NewReader(fileConfig)) if err != nil { return nil, nil, fmt.Errorf("Unable to parse setup configuration: %s", err) } // Prepare the file builder builder := file.Builder{} warnings, err := builder.Prepare(tpl.Builders["file"].Config) if len(warnings) > 0 { for _, warn := range warnings { return nil, nil, fmt.Errorf("Configuration warning: %s", warn) } } if err != nil { return nil, nil, fmt.Errorf("Invalid configuration: %s", err) } // Run the file builder artifact, err := builder.Run(ui, nil, cache) if err != nil { return nil, nil, fmt.Errorf("Failed to build artifact: %s", err) } return ui, artifact, err }
func testChecksum(t *testing.T, config string) packer.Artifact { ui, artifact, err := setup(t) if err != nil { t.Fatalf("Error bootstrapping test: %s", err) } if artifact != nil { defer artifact.Destroy() } tpl, err := template.Parse(strings.NewReader(config)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } checksum := PostProcessor{} checksum.Configure(tpl.PostProcessors[0][0].Config) // I get the feeling these should be automatically available somewhere, but // some of the post-processors construct this manually. checksum.config.ctx.BuildName = "chocolate" checksum.config.PackerBuildName = "vanilla" checksum.config.PackerBuilderType = "file" artifactOut, _, err := checksum.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to checksum artifact: %s", err) } return artifactOut }
func testArchive(t *testing.T, config string) packer.Artifact { if os.Getenv(env.TestEnvVar) == "" { t.Skip(fmt.Sprintf( "Acceptance tests skipped unless env '%s' set", env.TestEnvVar)) } ui, artifact, err := setup(t) if err != nil { t.Fatalf("Error bootstrapping test: %s", err) } if artifact != nil { defer artifact.Destroy() } tpl, err := template.Parse(strings.NewReader(config)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } compressor := PostProcessor{} compressor.Configure(tpl.PostProcessors[0][0].Config) artifactOut, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to compress artifact: %s", err) } return artifactOut }
func TestCoreValidate(t *testing.T) { cases := []struct { File string Vars map[string]string Err bool }{ { "validate-dup-builder.json", nil, true, }, // Required variable not set { "validate-req-variable.json", nil, true, }, { "validate-req-variable.json", map[string]string{"foo": "bar"}, false, }, } for _, tc := range cases { f, err := os.Open(fixtureDir(tc.File)) if err != nil { t.Fatalf("err: %s", err) } tpl, err := template.Parse(f) f.Close() if err != nil { t.Fatalf("err: %s\n\n%s", tc.File, err) } _, err = NewCore(&CoreConfig{ Template: tpl, Variables: tc.Vars, }) if (err != nil) != tc.Err { t.Fatalf("err: %s\n\n%s", tc.File, err) } } }
func testArchive(t *testing.T, config string) packer.Artifact { ui, artifact, err := setup(t) if err != nil { t.Fatalf("Error bootstrapping test: %s", err) } if artifact != nil { defer artifact.Destroy() } tpl, err := template.Parse(strings.NewReader(config)) if err != nil { t.Fatalf("Unable to parse test config: %s", err) } compressor := PostProcessor{} compressor.Configure(tpl.PostProcessors[0][0].Config) artifactOut, _, err := compressor.PostProcess(ui, artifact) if err != nil { t.Fatalf("Failed to compress artifact: %s", err) } return artifactOut }
func (c *FixCommand) Run(args []string) int { var flagValidate bool flags := c.Meta.FlagSet("fix", FlagSetNone) flags.BoolVar(&flagValidate, "validate", true, "") flags.Usage = func() { c.Ui.Say(c.Help()) } if err := flags.Parse(args); err != nil { return 1 } args = flags.Args() if len(args) != 1 { flags.Usage() return 1 } // Read the file for decoding tplF, err := os.Open(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error opening template: %s", err)) return 1 } defer tplF.Close() // Decode the JSON into a generic map structure var templateData map[string]interface{} decoder := json.NewDecoder(tplF) if err := decoder.Decode(&templateData); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing template: %s", err)) return 1 } // Close the file since we're done with that tplF.Close() input := templateData for _, name := range fix.FixerOrder { var err error fixer, ok := fix.Fixers[name] if !ok { panic("fixer not found: " + name) } log.Printf("Running fixer: %s", name) input, err = fixer.Fix(input) if err != nil { c.Ui.Error(fmt.Sprintf("Error fixing: %s", err)) return 1 } } var output bytes.Buffer encoder := json.NewEncoder(&output) if err := encoder.Encode(input); err != nil { c.Ui.Error(fmt.Sprintf("Error encoding: %s", err)) return 1 } var indented bytes.Buffer if err := json.Indent(&indented, output.Bytes(), "", " "); err != nil { c.Ui.Error(fmt.Sprintf("Error encoding: %s", err)) return 1 } result := indented.String() result = strings.Replace(result, `\u003c`, "<", -1) result = strings.Replace(result, `\u003e`, ">", -1) c.Ui.Say(result) if flagValidate { // Attemot to parse and validate the template tpl, err := template.Parse(strings.NewReader(result)) if err != nil { c.Ui.Error(fmt.Sprintf( "Error! Fixed template fails to parse: %s\n\n"+ "This is usually caused by an error in the input template.\n"+ "Please fix the error and try again.", err)) return 1 } if err := tpl.Validate(); err != nil { c.Ui.Error(fmt.Sprintf( "Error! Fixed template failed to validate: %s\n\n"+ "This is usually caused by an error in the input template.\n"+ "Please fix the error and try again.", err)) return 1 } } return 0 }
func (c BuildCommand) Run(args []string) int { var cfgColor, cfgDebug, cfgForce, cfgParallel bool flags := c.Meta.FlagSet("build", FlagSetBuildFilter|FlagSetVars) flags.Usage = func() { c.Ui.Say(c.Help()) } flags.BoolVar(&cfgColor, "color", true, "") flags.BoolVar(&cfgDebug, "debug", false, "") flags.BoolVar(&cfgForce, "force", false, "") flags.BoolVar(&cfgParallel, "parallel", true, "") if err := flags.Parse(args); err != nil { return 1 } args = flags.Args() if len(args) != 1 { flags.Usage() return 1 } // Parse the template var tpl *template.Template var err error if args[0] == "-" { tpl, err = template.Parse(os.Stdin) } else { tpl, err = template.ParseFile(args[0]) } if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse template: %s", err)) return 1 } // Get the core core, err := c.Meta.Core(tpl) if err != nil { c.Ui.Error(err.Error()) return 1 } // Get the builds we care about buildNames := c.Meta.BuildNames(core) builds := make([]packer.Build, 0, len(buildNames)) for _, n := range buildNames { b, err := core.Build(n) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to initialize build '%s': %s", n, err)) continue } builds = append(builds, b) } if cfgDebug { c.Ui.Say("Debug mode enabled. Builds will not be parallelized.") } // Compile all the UIs for the builds colors := [5]packer.UiColor{ packer.UiColorGreen, packer.UiColorCyan, packer.UiColorMagenta, packer.UiColorYellow, packer.UiColorBlue, } buildUis := make(map[string]packer.Ui) for i, b := range buildNames { var ui packer.Ui ui = c.Ui if cfgColor { ui = &packer.ColoredUi{ Color: colors[i%len(colors)], Ui: ui, } } buildUis[b] = ui ui.Say(fmt.Sprintf("%s output will be in this color.", b)) } // Add a newline between the color output and the actual output c.Ui.Say("") log.Printf("Build debug mode: %v", cfgDebug) log.Printf("Force build: %v", cfgForce) // Set the debug and force mode and prepare all the builds for _, b := range builds { log.Printf("Preparing build: %s", b.Name()) b.SetDebug(cfgDebug) b.SetForce(cfgForce) warnings, err := b.Prepare() if err != nil { c.Ui.Error(err.Error()) return 1 } if len(warnings) > 0 { ui := buildUis[b.Name()] ui.Say(fmt.Sprintf("Warnings for build '%s':\n", b.Name())) for _, warning := range warnings { ui.Say(fmt.Sprintf("* %s", warning)) } ui.Say("") } } // Run all the builds in parallel and wait for them to complete var interruptWg, wg sync.WaitGroup interrupted := false artifacts := make(map[string][]packer.Artifact) errors := make(map[string]error) for _, b := range builds { // Increment the waitgroup so we wait for this item to finish properly wg.Add(1) // Handle interrupts for this build sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, os.Interrupt) defer signal.Stop(sigCh) go func(b packer.Build) { <-sigCh interruptWg.Add(1) defer interruptWg.Done() interrupted = true log.Printf("Stopping build: %s", b.Name()) b.Cancel() log.Printf("Build cancelled: %s", b.Name()) }(b) // Run the build in a goroutine go func(b packer.Build) { defer wg.Done() name := b.Name() log.Printf("Starting build run: %s", name) ui := buildUis[name] runArtifacts, err := b.Run(ui, c.Cache) if err != nil { ui.Error(fmt.Sprintf("Build '%s' errored: %s", name, err)) errors[name] = err } else { ui.Say(fmt.Sprintf("Build '%s' finished.", name)) artifacts[name] = runArtifacts } }(b) if cfgDebug { log.Printf("Debug enabled, so waiting for build to finish: %s", b.Name()) wg.Wait() } if !cfgParallel { log.Printf("Parallelization disabled, waiting for build to finish: %s", b.Name()) wg.Wait() } if interrupted { log.Println("Interrupted, not going to start any more builds.") break } } // Wait for both the builds to complete and the interrupt handler, // if it is interrupted. log.Printf("Waiting on builds to complete...") wg.Wait() log.Printf("Builds completed. Waiting on interrupt barrier...") interruptWg.Wait() if interrupted { c.Ui.Say("Cleanly cancelled builds after being interrupted.") return 1 } if len(errors) > 0 { c.Ui.Machine("error-count", strconv.FormatInt(int64(len(errors)), 10)) c.Ui.Error("\n==> Some builds didn't complete successfully and had errors:") for name, err := range errors { // Create a UI for the machine readable stuff to be targetted ui := &packer.TargettedUi{ Target: name, Ui: c.Ui, } ui.Machine("error", err.Error()) c.Ui.Error(fmt.Sprintf("--> %s: %s", name, err)) } } if len(artifacts) > 0 { c.Ui.Say("\n==> Builds finished. The artifacts of successful builds are:") for name, buildArtifacts := range artifacts { // Create a UI for the machine readable stuff to be targetted ui := &packer.TargettedUi{ Target: name, Ui: c.Ui, } // Machine-readable helpful ui.Machine("artifact-count", strconv.FormatInt(int64(len(buildArtifacts)), 10)) for i, artifact := range buildArtifacts { var message bytes.Buffer fmt.Fprintf(&message, "--> %s: ", name) if artifact != nil { fmt.Fprintf(&message, artifact.String()) } else { fmt.Fprint(&message, "<nothing>") } iStr := strconv.FormatInt(int64(i), 10) if artifact != nil { ui.Machine("artifact", iStr, "builder-id", artifact.BuilderId()) ui.Machine("artifact", iStr, "id", artifact.Id()) ui.Machine("artifact", iStr, "string", artifact.String()) files := artifact.Files() ui.Machine("artifact", iStr, "files-count", strconv.FormatInt(int64(len(files)), 10)) for fi, file := range files { fiStr := strconv.FormatInt(int64(fi), 10) ui.Machine("artifact", iStr, "file", fiStr, file) } } else { ui.Machine("artifact", iStr, "nil") } ui.Machine("artifact", iStr, "end") c.Ui.Say(message.String()) } } } else { c.Ui.Say("\n==> Builds finished but no artifacts were created.") } if len(errors) > 0 { // If any errors occurred, exit with a non-zero exit status return 1 } return 0 }
// TestUploadDownload verifies that basic upload / download functionality works func TestUploadDownload(t *testing.T) { ui := packer.TestUi(t) cache := &packer.FileCache{CacheDir: os.TempDir()} tpl, err := template.Parse(strings.NewReader(dockerBuilderConfig)) if err != nil { t.Fatalf("Unable to parse config: %s", err) } if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1") } cmd := exec.Command("docker", "-v") cmd.Run() if !cmd.ProcessState.Success() { t.Error("docker command not found; please make sure docker is installed") } // Setup the builder builder := &Builder{} warnings, err := builder.Prepare(tpl.Builders["docker"].Config) if err != nil { t.Fatalf("Error preparing configuration %s", err) } if len(warnings) > 0 { t.Fatal("Encountered configuration warnings; aborting") } // Setup the provisioners upload := &file.Provisioner{} err = upload.Prepare(tpl.Provisioners[0].Config) if err != nil { t.Fatalf("Error preparing upload: %s", err) } download := &file.Provisioner{} err = download.Prepare(tpl.Provisioners[1].Config) if err != nil { t.Fatalf("Error preparing download: %s", err) } // Preemptive cleanup. Honestly I don't know why you would want to get rid // of my strawberry cake. It's so tasty! Do you not like cake? Are you a // cake-hater? Or are you keeping all the cake all for yourself? So selfish! defer os.Remove("my-strawberry-cake") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} hooks[packer.HookProvision] = []packer.Hook{ &packer.ProvisionHook{ Provisioners: []packer.Provisioner{ upload, download, }, }, } hook := &packer.DispatchHook{Mapping: hooks} // Run things artifact, err := builder.Run(ui, hook, cache) if err != nil { t.Fatalf("Error running build %s", err) } // Preemptive cleanup defer artifact.Destroy() // Verify that the thing we downloaded is the same thing we sent up. // Complain loudly if it isn't. inputFile, err := ioutil.ReadFile("test-fixtures/onecakes/strawberry") if err != nil { t.Fatalf("Unable to read input file: %s", err) } outputFile, err := ioutil.ReadFile("my-strawberry-cake") if err != nil { t.Fatalf("Unable to read output file: %s", err) } if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { t.Fatalf("Input and output files do not match\n"+ "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile) } }
// TestLargeDownload verifies that files are the apporpriate size after being // downloaded. This is to identify and fix the race condition in #2793. You may // need to use github.com/cbednarski/rerun to verify since this problem occurs // only intermittently. func TestLargeDownload(t *testing.T) { ui := packer.TestUi(t) cache := &packer.FileCache{CacheDir: os.TempDir()} tpl, err := template.Parse(strings.NewReader(dockerLargeBuilderConfig)) if err != nil { t.Fatalf("Unable to parse config: %s", err) } if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1") } cmd := exec.Command("docker", "-v") cmd.Run() if !cmd.ProcessState.Success() { t.Error("docker command not found; please make sure docker is installed") } // Setup the builder builder := &Builder{} warnings, err := builder.Prepare(tpl.Builders["docker"].Config) if err != nil { t.Fatalf("Error preparing configuration %s", err) } if len(warnings) > 0 { t.Fatal("Encountered configuration warnings; aborting") } // Setup the provisioners shell := &shell.Provisioner{} err = shell.Prepare(tpl.Provisioners[0].Config) if err != nil { t.Fatalf("Error preparing shell provisioner: %s", err) } downloadCupcake := &file.Provisioner{} err = downloadCupcake.Prepare(tpl.Provisioners[1].Config) if err != nil { t.Fatalf("Error preparing downloadCupcake: %s", err) } downloadBigcake := &file.Provisioner{} err = downloadBigcake.Prepare(tpl.Provisioners[2].Config) if err != nil { t.Fatalf("Error preparing downloadBigcake: %s", err) } // Preemptive cleanup. defer os.Remove("cupcake") defer os.Remove("bigcake") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} hooks[packer.HookProvision] = []packer.Hook{ &packer.ProvisionHook{ Provisioners: []packer.Provisioner{ shell, downloadCupcake, downloadBigcake, }, }, } hook := &packer.DispatchHook{Mapping: hooks} // Run things artifact, err := builder.Run(ui, hook, cache) if err != nil { t.Fatalf("Error running build %s", err) } // Preemptive cleanup defer artifact.Destroy() // Verify that the things we downloaded are the right size. Complain loudly // if they are not. // // cupcake should be 2097152 bytes // bigcake should be 104857600 bytes cupcake, err := os.Stat("cupcake") if err != nil { t.Fatalf("Unable to stat cupcake file: %s", err) } cupcakeExpected := int64(2097152) if cupcake.Size() != cupcakeExpected { t.Errorf("Expected cupcake to be %d bytes; found %d", cupcakeExpected, cupcake.Size()) } bigcake, err := os.Stat("bigcake") if err != nil { t.Fatalf("Unable to stat bigcake file: %s", err) } bigcakeExpected := int64(104857600) if bigcake.Size() != bigcakeExpected { t.Errorf("Expected bigcake to be %d bytes; found %d", bigcakeExpected, bigcake.Size()) } // TODO if we can, calculate a sha inside the container and compare to the // one we get after we pull it down. We will probably have to parse the log // or ui output to do this because we use /dev/urandom to create the file. // if sha256.Sum256(inputFile) != sha256.Sum256(outputFile) { // t.Fatalf("Input and output files do not match\n"+ // "Input:\n%s\nOutput:\n%s\n", inputFile, outputFile) // } }
// Test performs an acceptance test on a backend with the given test case. // // Tests are not run unless an environmental variable "TF_ACC" is // set to some non-empty value. This is to avoid test cases surprising // a user by creating real resources. // // Tests will fail unless the verbose flag (`go test -v`, or explicitly // the "-test.v" flag) is set. Because some acceptance tests take quite // long, we require the verbose flag so users are able to see progress // output. func Test(t TestT, c TestCase) { // We only run acceptance tests if an env var is set because they're // slow and generally require some outside configuration. if os.Getenv(TestEnvVar) == "" { t.Skip(fmt.Sprintf( "Acceptance tests skipped unless env '%s' set", TestEnvVar)) return } // We require verbose mode so that the user knows what is going on. if !testTesting && !testing.Verbose() { t.Fatal("Acceptance tests must be run with the -v flag on tests") return } // Run the PreCheck if we have it if c.PreCheck != nil { c.PreCheck() } // Parse the template log.Printf("[DEBUG] Parsing template...") tpl, err := template.Parse(strings.NewReader(c.Template)) if err != nil { t.Fatal(fmt.Sprintf("Failed to parse template: %s", err)) return } // Build the core log.Printf("[DEBUG] Initializing core...") core, err := packer.NewCore(&packer.CoreConfig{ Components: packer.ComponentFinder{ Builder: func(n string) (packer.Builder, error) { if n == "test" { return c.Builder, nil } return nil, nil }, }, Template: tpl, }) if err != nil { t.Fatal(fmt.Sprintf("Failed to init core: %s", err)) return } // Get the build log.Printf("[DEBUG] Retrieving 'test' build") build, err := core.Build("test") if err != nil { t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err)) return } // Prepare it log.Printf("[DEBUG] Preparing 'test' build") warnings, err := build.Prepare() if err != nil { t.Fatal(fmt.Sprintf("Prepare error: %s", err)) return } if len(warnings) > 0 { t.Fatal(fmt.Sprintf( "Prepare warnings:\n\n%s", strings.Join(warnings, "\n"))) return } // Run it! We use a temporary directory for caching and discard // any UI output. We discard since it shows up in logs anyways. log.Printf("[DEBUG] Running 'test' build") cache := &packer.FileCache{CacheDir: os.TempDir()} ui := &packer.BasicUi{ Reader: os.Stdin, Writer: ioutil.Discard, ErrorWriter: ioutil.Discard, } artifacts, err := build.Run(ui, cache) if err != nil { t.Fatal(fmt.Sprintf("Run error:\n\n%s", err)) goto TEARDOWN } // Check function if c.Check != nil { log.Printf("[DEBUG] Running check function") if err := c.Check(artifacts); err != nil { t.Fatal(fmt.Sprintf("Check error:\n\n%s", err)) goto TEARDOWN } } TEARDOWN: if !c.SkipArtifactTeardown { // Delete all artifacts for _, a := range artifacts { if err := a.Destroy(); err != nil { t.Error(fmt.Sprintf( "!!! ERROR REMOVING ARTIFACT '%s': %s !!!", a.String(), err)) } } } // Teardown if c.Teardown != nil { log.Printf("[DEBUG] Running teardown function") if err := c.Teardown(); err != nil { t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err)) return } } }