func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("trusted push failed: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } // Snapshots last for three years. This should be expired fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) runAtDifferentDate(fourYearsLater, func() { // Push with wrong passphrases pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) if err == nil { c.Fatalf("Error missing from trusted push with expired snapshot: \n%s", out) } if !strings.Contains(string(out), "repository out-of-date") { c.Fatalf("Missing expected output on trusted push with expired snapshot:\n%s", out) } }) }
func TestBuildCacheADD(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "1") buildCmd := exec.Command(dockerBinary, "build", "-t", "testcacheadd1", ".") buildCmd.Dir = buildDirectory exitCode, err := runCommand(buildCmd) errorOut(err, t, fmt.Sprintf("build failed to complete: %v", err)) if err != nil || exitCode != 0 { t.Fatal("failed to build the image") } buildDirectory = filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "2") buildCmd = exec.Command(dockerBinary, "build", "-t", "testcacheadd2", ".") buildCmd.Dir = buildDirectory out, exitCode, err := runCommandWithOutput(buildCmd) errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) if err != nil || exitCode != 0 { t.Fatal("failed to build the image") } if strings.Contains(out, "Using cache") { t.Fatal("2nd build used cache on ADD, it shouldn't") } deleteImages("testcacheadd1") deleteImages("testcacheadd2") logDone("build - build two images with ADD") }
// ForceUnmount attempts to forcibly unmount a given mount. // It does so by calling diskutil or fusermount directly. func ForceUnmount(m Mount) error { point := m.MountPoint() log.Warningf("Force-Unmounting %s...", point) var cmd *exec.Cmd switch runtime.GOOS { case "darwin": cmd = exec.Command("diskutil", "umount", "force", point) case "linux": cmd = exec.Command("fusermount", "-u", point) default: return fmt.Errorf("unmount: unimplemented") } errc := make(chan error, 1) go func() { defer close(errc) // try vanilla unmount first. if err := exec.Command("umount", point).Run(); err == nil { return } // retry to unmount with the fallback cmd errc <- cmd.Run() }() select { case <-time.After(7 * time.Second): return fmt.Errorf("umount timeout") case err := <-errc: return err } }
func TestStatStdin(t *testing.T) { switch runtime.GOOS { case "android", "plan9": t.Skipf("%s doesn't have /bin/sh", runtime.GOOS) } testenv.MustHaveExec(t) if Getenv("GO_WANT_HELPER_PROCESS") == "1" { st, err := Stdin.Stat() if err != nil { t.Fatalf("Stat failed: %v", err) } fmt.Println(st.Mode() & ModeNamedPipe) Exit(0) } var cmd *osexec.Cmd if runtime.GOOS == "windows" { cmd = osexec.Command("cmd", "/c", "echo output | "+Args[0]+" -test.run=TestStatStdin") } else { cmd = osexec.Command("/bin/sh", "-c", "echo output | "+Args[0]+" -test.run=TestStatStdin") } cmd.Env = append(Environ(), "GO_WANT_HELPER_PROCESS=1") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) } // result will be like "prw-rw-rw" if len(output) < 1 || output[0] != 'p' { t.Fatalf("Child process reports stdin is not pipe '%v'", string(output)) } }
func TestPushRepoWithSingleTag(t *testing.T) { var cmd *exec.Cmd var err error var out string reponame := "busybox" repotag := "latest" repoBase := reponame + ":" + repotag repoDest := Domains + "/" + UserName + "/" + repoBase cmd = exec.Command(DockerBinary, "tag", "-f", repoBase, repoDest) if out, err = ParseCmdCtx(cmd); err != nil { t.Fatalf("Tag %v failed: [Info]%v, [Error]%v", repoBase, out, err) } //push the same repository with specified tag more than once to cover related code processing branch for i := 1; i <= 2; i++ { cmd = exec.Command(DockerBinary, "push", repoDest) if out, err = ParseCmdCtx(cmd); err != nil { t.Fatalf("Push %v failed: [Info]%v, [Error]%v", repoDest, out, err) } } cmd = exec.Command(DockerBinary, "rmi", repoDest) out, err = ParseCmdCtx(cmd) }
func verifyCertWithSystem(block *pem.Block, add func(*Certificate)) { data := pem.EncodeToMemory(block) var cmd *exec.Cmd if needsTmpFiles() { f, err := ioutil.TempFile("", "cert") if err != nil { fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err) return } defer os.Remove(f.Name()) if _, err := f.Write(data); err != nil { fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) return } if err := f.Close(); err != nil { fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) return } cmd = exec.Command("/usr/bin/security", "verify-cert", "-c", f.Name(), "-l") } else { cmd = exec.Command("/usr/bin/security", "verify-cert", "-c", "/dev/stdin", "-l") cmd.Stdin = bytes.NewReader(data) } if cmd.Run() == nil { // Non-zero exit means untrusted cert, err := ParseCertificate(block.Bytes) if err != nil { return } add(cert) } }
// TestTags verifies that the -tags argument controls which files to check. func TestTags(t *testing.T) { // go build cmd := exec.Command("go", "build", "-o", binary) run(cmd, t) // defer removal of vet defer os.Remove(binary) args := []string{ "-tags=testtag", "-v", // We're going to look at the files it examines. "testdata/tagtest", } cmd = exec.Command("./"+binary, args...) output, err := cmd.CombinedOutput() if err != nil { t.Fatal(err) } // file1 has testtag and file2 has !testtag. if !bytes.Contains(output, []byte(filepath.Join("tagtest", "file1.go"))) { t.Error("file1 was excluded, should be included") } if bytes.Contains(output, []byte(filepath.Join("tagtest", "file2.go"))) { t.Error("file2 was included, should be excluded") } }
func runInstall(image, installType, cloudConfig, device string, force, reboot bool) error { in := bufio.NewReader(os.Stdin) fmt.Printf("Installing from %s\n", image) if !force { if !yes(in, "Continue") { os.Exit(1) } } if installType == "generic" { cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=all-volumes", "--entrypoint=/scripts/set-disk-partitions", image, device) cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr if err := cmd.Run(); err != nil { return err } } cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=user-volumes", image, "-d", device, "-t", installType, "-c", cloudConfig) cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr if err := cmd.Run(); err != nil { return err } if reboot && yes(in, "Continue with reboot") { log.Info("Rebooting") power.Reboot() } return nil }
func setupRamDisk(path string) error { // err := exec.Command("umount", "-f", path).Run() // if err == nil { // log.Printf("Unmounted ramdisk at %s - you may want to eject it!", path) // } // 1 MB diskPath, err := exec.Command("hdiutil", "attach", "-nomount", "ram://2048").Output() if err != nil { log.Fatal("Creating ramdisk: ", err) } diskPathStr := strings.TrimSpace(string(diskPath)) log.Printf("Created ramdisk at %s", diskPathStr) err = exec.Command("newfs_hfs", diskPathStr).Run() if err != nil { log.Fatal("Formatting ramdisk: ", err) } log.Printf("Formatted ramdisk as HFS.") if _, err := os.Stat(path); os.IsNotExist(err) { err = os.Mkdir(path, 0700) if err != nil { log.Fatal("Making dir for ramdisk: ", err) } } err = exec.Command("mount", "-t", "hfs", diskPathStr, path).Run() if err != nil { log.Fatal("Mounting ramdisk: ", err) } log.Printf("Ramdisk mounted at %s", path) return nil }
func (spec SpecPackage) checkPackage(defaults PlatformDefaults) (err error) { var cmd *exec.Cmd switch spec.Type { case "rpm": rpm, err := exec.LookPath("rpm") if err != nil { return err } cmd = exec.Command(rpm, "-q", spec.Name) case "dpkg": dpkg, err := exec.LookPath("dpkg") if err != nil { return err } cmd = exec.Command(dpkg, "-L", spec.Name) case "pacman": // TODO case "ebuild": // TODO case "homebrew": cmd = exec.Command("/usr/local/bin/brew", "ls", spec.Name) case "gem": cmd = exec.Command("/bin/bash", "-ic", "gem contents "+spec.Name) default: return errors.New("Unknown package manager type " + spec.Type) } err = cmd.Run() if err != nil && !spec.Absent { return err } else { return nil } }
func main() { c1 := exec.Command("df", "-h") c2 := exec.Command("sort") r, err := c1.StdoutPipe() if err != nil { fmt.Println("failed to create pipe:", err) return } defer r.Close() c1.Stdin = os.Stdin c1.Stderr = os.Stderr // the stdout of c1 feeds the stdin of c2 c2.Stdin = r c2.Stdout = os.Stdout c2.Stderr = os.Stderr err = c1.Start() if err != nil { fmt.Println("process #1 failed to start:", err) } // we Wait on c1 to clean up its process entry when done // (otherwise, it'll remain as a zombie process until this process exits) defer c1.Wait() err = c2.Run() if err != nil { fmt.Println("process #2 failed:", err) } }
func TestCommitAfterContainerIsDone(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) _, _, err = runCommandWithOutput(waitCmd) errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) out, _, err = runCommandWithOutput(inspectCmd) errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) logDone("commit - echo foo and commit the image") }
func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) if err := pushCmd.Start(); err != nil { c.Fatalf("Failed to start pushing to private registry: %v", err) } // Interrupt push (yes, we have no idea at what point it will get killed). time.Sleep(50 * time.Millisecond) // dependent on race condition. if err := pushCmd.Process.Kill(); err != nil { c.Fatalf("Failed to kill push process: %v", err) } if out, _, err := dockerCmdWithError("push", repoName); err == nil { if !strings.Contains(out, "already in progress") { c.Fatalf("Push should be continued on daemon side, but seems ok: %v, %s", err, out) } } // now wait until all this pushes will complete // if it failed with timeout - there would be some error, // so no logic about it here for exec.Command(dockerBinary, "push", repoName).Run() != nil { } }
func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("trusted push failed: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } // The timestamps expire in two weeks. Lets check three threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) // Should succeed because the server transparently re-signs one runAtDifferentDate(threeWeeksLater, func() { pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push with expired timestamp:\n%s", out) } }) }
func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil { c.Fatal(err) } type dstop struct { out []byte err error } ch := make(chan dstop) go func() { out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() ch <- dstop{out, err} close(ch) }() select { case <-time.After(3 * time.Second): c.Fatal("Container stop timed out") case s := <-ch: c.Assert(s.err, check.IsNil) } }
func teardownRamDisk(path string) error { out, err := exec.Command("hdiutil", "detach", "-force", path).CombinedOutput() if err != nil { // Sometimes there's a resource-busy error...sleep and retry time.Sleep(time.Second) out, err = exec.Command("hdiutil", "detach", "-force", path).CombinedOutput() } if err != nil { log.Print("Umounting/ejecting ramdisk: ", err, " ", string(out)) return err } log.Printf("Ramdisk %s unmounted and ejected.", path) // rm -r is dangerous... if isMatch, _ := regexp.MatchString("\\A/tmp/[^/]+", path); isMatch { out, err = exec.Command("rm", "-r", path).CombinedOutput() // if err != nil { // // Sometimes there's an error // time.Sleep(time.Second) // err = exec.Command("rm", "-r", path).Run() if err != nil { log.Print("rm -r: ", err, " ", string(out)) return err } // } log.Printf("Mountpoint folder %s removed.", path) } return nil }
// buildBenchmark builds the benchmark binary. func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) { goroot := filepath.Join(workpath, "go") gobin := filepath.Join(goroot, "bin", "go") + exeExt gopath := filepath.Join(*buildroot, "gopath") env := append([]string{ "GOROOT=" + goroot, "GOPATH=" + gopath}, b.envv()...) // First, download without installing. args := []string{"get", "-d"} if update { args = append(args, "-u") } args = append(args, *benchPath) var buildlog bytes.Buffer runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)} err = run(exec.Command(gobin, args...), runOpts...) if err != nil { fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err) return "", buildlog.String(), err } // Then, build into workpath. benchBin = filepath.Join(workpath, "benchbin") + exeExt args = []string{"build", "-o", benchBin, *benchPath} buildlog.Reset() err = run(exec.Command(gobin, args...), runOpts...) if err != nil { fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err) return "", buildlog.String(), err } return benchBin, "", nil }
func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { // TODO Windows CI: Requires some extra work. Consider copying the // runSleepingContainer helper to have an exec version. testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") err := exec.Command(dockerBinary, "exec", "testing", "top").Start() c.Assert(err, checker.IsNil) type dstop struct { out []byte err error } ch := make(chan dstop) go func() { out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() ch <- dstop{out, err} close(ch) }() select { case <-time.After(3 * time.Second): c.Fatal("Container stop timed out") case s := <-ch: c.Assert(s.err, check.IsNil) } }
// Run this shell script, but do it in Go so it can be run by "go test". // go build -o testvet // $(GOROOT)/test/errchk ./testvet -shadow -printfuncs='Warn:1,Warnf:1' testdata/*.go testdata/*.s // rm testvet // func TestVet(t *testing.T) { // Plan 9 and Windows systems can't be guaranteed to have Perl and so can't run errchk. switch runtime.GOOS { case "plan9", "windows": t.Skip("skipping test; no Perl on %q", runtime.GOOS) } // go build cmd := exec.Command("go", "build", "-o", binary) run(cmd, t) // defer removal of vet defer os.Remove(binary) // errchk ./testvet gos, err := filepath.Glob(filepath.Join(dataDir, "*.go")) if err != nil { t.Fatal(err) } asms, err := filepath.Glob(filepath.Join(dataDir, "*.s")) if err != nil { t.Fatal(err) } files := append(gos, asms...) errchk := filepath.Join(runtime.GOROOT(), "test", "errchk") flags := []string{ "./" + binary, "-printfuncs=Warn:1,Warnf:1", "-test", // TODO: Delete once -shadow is part of -all. } cmd = exec.Command(errchk, append(flags, files...)...) if !run(cmd, t) { t.Fatal("vet command failed") } }
func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { // Not applicable on Windows testRequires(c, DaemonIsLinux, NotUserNamespace) // Start main loop which attempts mknod repeatedly dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) // Check exec mknod doesn't work cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) // Check exec mknod does work with --privileged cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.IsNil) actual := strings.TrimSpace(out) c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) // Check subsequent unprivileged exec cannot mknod cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) // Confirm at no point was mknod allowed logCmd := exec.Command(dockerBinary, "logs", "parent") out, _, err = runCommandWithOutput(logCmd) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Success") }
func _spawn(command string) (*ExpectSubprocess, error) { wrapper := new(ExpectSubprocess) wrapper.outputBuffer = nil splitArgs, err := shell.Split(command) if err != nil { return nil, err } numArguments := len(splitArgs) - 1 if numArguments < 0 { return nil, errors.New("gexpect: No command given to spawn") } path, err := exec.LookPath(splitArgs[0]) if err != nil { return nil, err } if numArguments >= 1 { wrapper.Cmd = exec.Command(path, splitArgs[1:]...) } else { wrapper.Cmd = exec.Command(path) } wrapper.buf = new(buffer) return wrapper, nil }
func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { repoName := "foobar-save-multi-name-test" // Make one image tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) if out, _, err := runCommandWithOutput(tagCmd); err != nil { c.Fatalf("failed to tag repo: %s, %v", out, err) } // Make two images tagCmd = exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) out, _, err := runCommandWithOutput(tagCmd) if err != nil { c.Fatalf("failed to tag repo: %s, %v", out, err) } out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), exec.Command("tar", "xO", "repositories"), exec.Command("grep", "-q", "-E", "(-one|-two)"), ) if err != nil { c.Fatalf("failed to save multiple repos: %s, %v", out, err) } }
func setBash() { findcmd := "which" if runtime.GOOS == "windows" { // Can't use paths returned from which even if it's on PATH in Windows // Because our Go binary is a separate Windows app & not MinGW, it // can't understand paths like '/usr/bin/bash', needs Windows version findcmd = "where" } out, err := exec.Command(findcmd, "bash").Output() if err != nil { fmt.Println("Unable to find bash:", err) os.Exit(1) } bashPath = strings.TrimSpace(string(out)) if debugging { fmt.Println("Using", bashPath) } // Test _, err = exec.Command(bashPath, "--version").CombinedOutput() if err != nil { fmt.Println("Error calling bash:", err) os.Exit(1) } }
func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := s.setupTrustedImage(c, "trusted-create-expired") // Certificates have 10 years of expiration elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) runAtDifferentDate(elevenYearsFromNow, func() { // Try create createCmd := exec.Command(dockerBinary, "create", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) }) runAtDifferentDate(elevenYearsFromNow, func() { // Try create createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) }) }
func TestPushRepoWithMultipleTags(t *testing.T) { var cmd *exec.Cmd var err error var out string reponame := "busybox" repotags := []string{"latest", "1.0", "2.0"} repoBase := reponame + ":" + repotags[0] //pull busybox:latest from docker hub repoDest := Domains + "/" + UserName + "/" + reponame for _, v := range repotags { tag := repoDest + ":" + v cmd = exec.Command(DockerBinary, "tag", "-f", repoBase, tag) if out, err = ParseCmdCtx(cmd); err != nil { t.Fatalf("Tag %v failed: [Info]%v, [Error]%v", repoBase, out, err) } } //push the same repository with multiple tags more than once to cover related code processing branch for i := 1; i <= 2; i++ { cmd = exec.Command(DockerBinary, "push", repoDest) if out, err = ParseCmdCtx(cmd); err != nil { t.Fatalf("Push all tags %v failed: [Info]%v, [Error]%v", repoDest, out, err) } } for _, v := range repotags { tag := repoDest + ":" + v cmd = exec.Command(DockerBinary, "rmi", tag) out, err = ParseCmdCtx(cmd) } }
func updateGitFolder(path string) { fmt.Println() fmt.Println("Path: " + path) if _, err := os.Stat(path); os.IsNotExist(err) { fmt.Println("Not exists! Ignored") return } if err := os.Chdir(path); err != nil { fmt.Println("Access denied") return } if _, err := os.Stat(".git"); os.IsNotExist(err) { fmt.Println("Not a GIT folder! Ignored") return } fmt.Println("Revert all changes...") runCommand(exec.Command("git", "reset", "--hard", "HEAD")) fmt.Println("Pulling...") runCommand(exec.Command("git", "pull")) if _, err := os.Stat("deploy.sh"); err == nil { fmt.Println("Running external deploy.sh script...") runCommand(exec.Command("./deploy.sh")) } if _, err := os.Stat("build.sh"); err == nil { fmt.Println("Running external build.sh script...") runCommand(exec.Command("./build.sh")) } fmt.Println("Done") }
// #6445 ensure ONBUILD triggers aren't committed to grandchildren func TestBuildOnBuildLimitedInheritence(t *testing.T) { name1 := "testonbuildtrigger1" dockerfile1 := ` FROM busybox RUN echo "GRANDPARENT" ONBUILD RUN echo "ONBUILD PARENT" ` ctx1, err := fakeContext(dockerfile1, nil) if err != nil { t.Fatal(err) } buildCmd := exec.Command(dockerBinary, "build", "-t", name1, ".") buildCmd.Dir = ctx1.Dir out1, _, err := runCommandWithOutput(buildCmd) errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err)) defer deleteImages(name1) name2 := "testonbuildtrigger2" dockerfile2 := ` FROM testonbuildtrigger1 ` ctx2, err := fakeContext(dockerfile2, nil) if err != nil { t.Fatal(err) } buildCmd = exec.Command(dockerBinary, "build", "-t", name2, ".") buildCmd.Dir = ctx2.Dir out2, _, err := runCommandWithOutput(buildCmd) errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err)) defer deleteImages(name2) name3 := "testonbuildtrigger3" dockerfile3 := ` FROM testonbuildtrigger2 ` ctx3, err := fakeContext(dockerfile3, nil) if err != nil { t.Fatal(err) } buildCmd = exec.Command(dockerBinary, "build", "-t", name3, ".") buildCmd.Dir = ctx3.Dir out3, _, err := runCommandWithOutput(buildCmd) errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err)) defer deleteImages(name3) // ONBUILD should be run in second build. if !strings.Contains(out2, "ONBUILD PARENT") { t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") } // ONBUILD should *not* be run in third build. if strings.Contains(out3, "ONBUILD PARENT") { t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } logDone("build - onbuild") }
func (_ *UserTask) RenderLocal(t *local.LocalTarget, a, e, changes *UserTask) error { if a == nil { args := buildUseraddArgs(e) glog.Infof("Creating user %q", e.Name) cmd := exec.Command("useradd", args...) output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("error creating user: %v\nOutput: %s", err, output) } } else { var args []string if changes.Shell != "" { args = append(args, "-s", e.Shell) } if changes.Home != "" { args = append(args, "-d", e.Home) } if len(args) != 0 { args = append(args, e.Name) glog.Infof("Reconfiguring user %q", e.Name) cmd := exec.Command("usermod", args...) output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("error reconfiguring user: %v\nOutput: %s", err, output) } } } return nil }
// CpuInfo retrieves the CPU information of the system like number of physical and logical cores // in the system and processor model information. func CpuInfo() map[string]string { cpuMap := make(map[string]string) // Read the CPU information // Total number of logical cores in the system numLogicalCores := strconv.Itoa(runtime.NumCPU()) if numLogicalCores != "" { cpuMap["logicalCores"] = numLogicalCores } else { cpuMap["logicalCores"] = "Error: The number of logical cores for the current system could not be retrieved." } // Total number of physical cores in the system cmdNumProcessors := exec.Command("/bin/sh", "-c", "cat /proc/cpuinfo | grep processor | wc -l") outputNumProcessors, err := cmdNumProcessors.Output() if err != nil { cpuMap["physicalCores"] = "Error: The number of physical cores for the current system could not be retrieved." } else { cpuMap["physicalCores"] = string(outputNumProcessors) } // Processor Model Name cmdProcessorModel := exec.Command("/bin/sh", "-c", "cat /proc/cpuinfo | grep 'model name' | uniq | awk '{print substr($0, index($0,$4))}'") outputProcessorModel, err := cmdProcessorModel.Output() if err != nil { cpuMap["processorModel"] = "Error: The processor model for the current system could not be retrieved." } else { cpuMap["processorModel"] = string(outputProcessorModel) } return cpuMap }
func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("trusted push failed: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } // Push with wrong passphrases pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") out, _, err = runCommandWithOutput(pushCmd) if err == nil { c.Fatalf("Error missing from trusted push with short targets passphrase: \n%s", out) } if !strings.Contains(string(out), "password invalid, operation has failed") { c.Fatalf("Missing expected output on trusted push with short targets/snapsnot passphrase:\n%s", out) } }