func (d *Driver) VmMountLayer(id string) error { if d.daemon == nil { if err := d.Setup(); err != nil { return err } } var ( diffSrc = fmt.Sprintf("%s/diff/%s", d.RootPath(), id) volDst = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id) ) podstring, err := MakeMountPod("mac-vm-disk-mount-layer", "puller:latest", id, diffSrc, volDst) if err != nil { return err } podId := fmt.Sprintf("pull-%s", utils.RandStr(10, "alpha")) vm, ok := d.daemon.VmList[d.pullVm] if !ok { return fmt.Errorf("can not find VM(%s)", d.pullVm) } if vm.Status == types.S_VM_IDLE { code, cause, err := d.daemon.StartPod(podId, podstring, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN) if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) d.daemon.KillVm(d.pullVm) return err } vm := d.daemon.VmList[d.pullVm] // wait for cmd finish _, _, ret3, err := vm.GetVmChan() if err != nil { glog.Error(err.Error()) return err } subVmStatus := ret3.(chan *types.VmResponse) var vmResponse *types.VmResponse for { vmResponse = <-subVmStatus if vmResponse.VmId == d.pullVm { if vmResponse.Code == types.E_POD_FINISHED { glog.Infof("Got E_POD_FINISHED code response") break } } } d.daemon.PodList[podId].Vm = d.pullVm // release pod from VM code, cause, err = d.daemon.StopPod(podId, "no") if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) d.daemon.KillVm(d.pullVm) return err } d.daemon.CleanPod(podId) } else { glog.Errorf("pull vm should not be associated") } return nil }
func (p *Pod) startLogging(daemon *Daemon) (err error) { err = nil if err = p.getLogger(daemon); err != nil { return } if p.spec.LogConfig.Type == "none" { return nil } for _, c := range p.status.Containers { var stdout, stderr io.Reader tag := "log-" + utils.RandStr(8, "alphanum") if stdout, stderr, err = p.vm.GetLogOutput(c.Id, tag, nil); err != nil { return } c.Logs.Copier = logger.NewCopier(c.Id, map[string]io.Reader{"stdout": stdout, "stderr": stderr}, c.Logs.Driver) c.Logs.Copier.Run() if jl, ok := c.Logs.Driver.(*jsonfilelog.JSONFileLogger); ok { c.Logs.LogPath = jl.LogPath() } } return nil }
func imageToName(image string) string { name := image fields := strings.Split(image, "/") if len(fields) > 1 { name = fields[len(fields)-1] } fields = strings.Split(name, ":") if len(fields) < 2 { name = name + "-" + utils.RandStr(10, "number") } else { name = fields[0] + "-" + fields[1] + "-" + utils.RandStr(10, "number") } validContainerNameChars := `[a-zA-Z0-9][a-zA-Z0-9_.-]` validContainerNamePattern := regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) if !validContainerNamePattern.MatchString(name) { name = namesgenerator.GetRandomName(0) } return name }
func (b *Builder) create() (*daemon.Container, error) { if b.image == "" && !b.noBaseImage { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.Config.Image = b.image config := *b.Config // Create the Pod podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha")) podString, err := MakeBasicPod(podId, b.image, b.Config.Cmd.Slice()) if err != nil { return nil, err } err = b.Hyperdaemon.CreatePod(podId, podString, false) if err != nil { return nil, err } // Get the container var ( containerId = "" c *daemon.Container ) ps, ok := b.Hyperdaemon.PodList.GetStatus(podId) if !ok { return nil, fmt.Errorf("Cannot find pod %s", podId) } for _, i := range ps.Containers { containerId = i.Id } c, err = b.Daemon.Get(containerId) if err != nil { glog.Error(err.Error()) return nil, err } b.TmpContainers[c.ID] = struct{}{} b.TmpPods[podId] = struct{}{} fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) if config.Cmd.Len() > 0 { // override the entry point that may have been picked up from the base image s := config.Cmd.Slice() c.Path = s[0] c.Args = s[1:] } else { config.Cmd = runconfig.NewCommand() } return c, nil }
func (d Docker) ContainerStart(cId string, hostConfig *containertypes.HostConfig) (err error) { var vm *hypervisor.Vm podId := "" if _, ok := d.hyper.CopyPods[cId]; ok { podId = d.hyper.CopyPods[cId] } else if _, ok := d.hyper.BasicPods[cId]; ok { podId = d.hyper.BasicPods[cId] } else { return fmt.Errorf("container %s doesn't belong to pod", cId) } defer func() { if err != nil && d.hyper.Vm != nil { if d.hyper.Status != nil { d.hyper.Vm.ReleaseResponseChan(d.hyper.Status) d.hyper.Status = nil } glog.Infof("ContainerStart failed, KillVm") d.Daemon.KillVm(d.hyper.Vm.Id) d.hyper.Vm = nil } }() if d.hyper.Vm == nil { vmId := "buildevm-" + utils.RandStr(10, "number") d.hyper.Vm, err = d.Daemon.StartVm(vmId, 1, 512, false, false, hypertypes.VM_KEEP_AFTER_FINISH) if err != nil { return } d.hyper.Status, err = d.hyper.Vm.GetResponseChan() if err != nil { return } } vm = d.hyper.Vm if vm.Status == hypertypes.S_VM_IDLE { _, _, err = d.Daemon.StartPod(nil, nil, podId, vm.Id, "") if err != nil { glog.Errorf("start pod failed %s", err.Error()) return } return } glog.Errorf("Vm is not IDLE") return fmt.Errorf("Vm is not IDLE") }
// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. // If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, // will be read from the Context passed to Build(). func NewBuilder(d *daemon.Daemon, config *Config, docker builder.Docker, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { if config == nil { config = new(Config) } if config.BuildArgs == nil { config.BuildArgs = make(map[string]string) } hyper, err := GetDaemon() if err != nil { glog.Error(err.Error()) return nil, err } vmId := "buildervm-" + rand.RandStr(10, "number") defer func() { glog.V(1).Infof("Kill VM(%s)...", vmId) hyper.KillVm(vmId) }() b = &Builder{ Config: config, Daemon: d, Name: vmId, Hyperdaemon: hyper, Stdout: os.Stdout, Stderr: os.Stderr, docker: docker, context: context, runConfig: new(runconfig.Config), tmpContainers: map[string]struct{}{}, tmpPods: map[string]struct{}{}, cancelled: make(chan struct{}), id: stringid.GenerateNonCryptoID(), allowedBuildArgs: make(map[string]bool), } if dockerfile != nil { b.dockerfile, err = parser.Parse(dockerfile) if err != nil { return nil, err } } return b, nil }
func MountVFSVolume(src, sharedDir string) (string, error) { var flags uintptr = utils.MS_BIND mountSharedDir := utils.RandStr(10, "alpha") targetDir := path.Join(sharedDir, mountSharedDir) glog.V(1).Infof("trying to bind dir %s to %s", src, targetDir) stat, err := os.Stat(src) if err != nil { glog.Error("Cannot stat volume Source ", err.Error()) return "", err } if runtime.GOOS == "linux" { base := filepath.Dir(targetDir) if err := os.MkdirAll(base, 0755); err != nil && !os.IsExist(err) { glog.Errorf("error to create dir %s for volume %s", base, src) return "", err } if stat.IsDir() { if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) { glog.Errorf("error to create dir %s for volume %s", targetDir, src) return "", err } } else if f, err := os.Create(targetDir); err != nil && !os.IsExist(err) { glog.Errorf("error to create file %s for volume %s", targetDir, src) return "", err } else if err == nil { f.Close() } } if err := utils.Mount(src, targetDir, "none", flags, "--bind"); err != nil { glog.Errorf("bind dir %s failed: %s", src, err.Error()) return "", err } return mountSharedDir, nil }
// Override the Docker ContainerCreate interface, create pod to run command func (d Docker) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { var podString string var err error if params.Config == nil { return types.ContainerCreateResponse{}, derr.ErrorCodeEmptyConfig } podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha")) // Hack here, container created by ADD/COPY only has Config if params.HostConfig != nil { podString, err = MakeBasicPod(podId, params.Config.Image, params.Config.WorkingDir) } else { podString, err = MakeCopyPod(podId, params.Config.Image, params.Config.WorkingDir) } if err != nil { return types.ContainerCreateResponse{}, err } pod, err := d.Daemon.CreatePod(podId, podString, false) if err != nil { return types.ContainerCreateResponse{}, err } if len(pod.Status().Containers) != 1 { return types.ContainerCreateResponse{}, fmt.Errorf("container count in pod is incorrect") } cId := pod.Status().Containers[0].Id if params.HostConfig != nil { d.hyper.BasicPods[cId] = podId glog.Infof("basic containerId %s, podId %s", cId, podId) } else { d.hyper.CopyPods[cId] = podId glog.Infof("copy containerId %s, podId %s", cId, podId) } return types.ContainerCreateResponse{ID: cId}, nil }
func (cli *HyperClient) GetTag() string { return utils.RandStr(8, "alphanum") }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) } else { b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) } defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } // Create the Pod podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha")) tempSrcDir := fmt.Sprintf("/var/run/hyper/temp/%s/", podId) if err := os.MkdirAll(tempSrcDir, 0755); err != nil { glog.Errorf(err.Error()) return err } if _, err := os.Stat(tempSrcDir); err != nil { glog.Errorf(err.Error()) return err } shellDir := fmt.Sprintf("/var/run/hyper/shell/%s/", podId) if err := os.MkdirAll(shellDir, 0755); err != nil { glog.Errorf(err.Error()) return err } copyshell, err1 := os.Create(shellDir + "/exec-copy.sh") if err1 != nil { glog.Errorf(err1.Error()) return err1 } fmt.Fprintf(copyshell, "#!/bin/sh\n") podString, err := MakeCopyPod(podId, b.image, b.runConfig.WorkingDir, tempSrcDir, dest, shellDir) if err != nil { return err } err = b.Hyperdaemon.CreatePod(podId, podString, false) if err != nil { return err } // Get the container var ( containerId string = "" container *daemon.Container ) ps, ok := b.Hyperdaemon.PodList.GetStatus(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } for _, i := range ps.Containers { containerId = i.Id } container, err = b.Daemon.Get(containerId) if err != nil { glog.Error(err.Error()) return err } /* container, _, err := b.docker.Create(b.runConfig, nil) if err != nil { return err } defer b.docker.Unmount(container) */ b.tmpPods[podId] = struct{}{} b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(dest) { hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if hasSlash { dest += string(os.PathSeparator) } } for _, info := range infos { if err := b.docker.Copy(container, tempSrcDir, info.FileInfo, info.decompress); err != nil { return err } if strings.HasSuffix(dest, string(os.PathSeparator)) == true { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), filepath.Join(dest, info.FileInfo.Name()))) } else { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), dest)) } } fmt.Fprintf(copyshell, "umount /tmp/src/\n") fmt.Fprintf(copyshell, "umount /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/src/\n") copyshell.Close() // start or replace pod vm, ok := b.Hyperdaemon.VmList[b.Name] if !ok { glog.Warningf("can not find VM(%s)", b.Name) bo := &hypervisor.BootConfig{ CPU: 1, Memory: 512, Kernel: b.Hyperdaemon.Kernel, Initrd: b.Hyperdaemon.Initrd, Bios: b.Hyperdaemon.Bios, Cbfs: b.Hyperdaemon.Cbfs, Vbox: b.Hyperdaemon.VboxImage, } vm = b.Hyperdaemon.NewVm(b.Name, 1, 512, false, types.VM_KEEP_AFTER_FINISH) err = vm.Launch(bo) if err != nil { return err } b.Hyperdaemon.AddVm(vm) } if vm.Status == types.S_VM_IDLE { code, cause, err := b.Hyperdaemon.StartPod(podId, "", b.Name, nil, false, false, types.VM_KEEP_AFTER_FINISH, []*hypervisor.TtyIO{}) if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } vm = b.Hyperdaemon.VmList[b.Name] // wait for cmd finish Status, err := vm.GetResponseChan() if err != nil { glog.Error(err.Error()) return err } defer vm.ReleaseResponseChan(Status) var vmResponse *types.VmResponse for { vmResponse = <-Status if vmResponse.VmId == b.Name { if vmResponse.Code == types.E_POD_FINISHED { glog.Infof("Got E_POD_FINISHED code response") break } } } pod, ok := b.Hyperdaemon.PodList.Get(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } pod.SetVM(b.Name, vm) // release pod from VM glog.Warningf("start stop pod") code, cause, err = b.Hyperdaemon.StopPod(podId, "no") if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } glog.Warningf("stop pod finish") } else { glog.Errorf("Vm is not IDLE") return fmt.Errorf("Vm is not IDLE") } if err := b.commit(container.ID, cmd, comment); err != nil { return err } return nil }
// hyper build [OPTIONS] PATH func (cli *HyperClient) HyperCmdBuild(args ...string) error { var opts struct { ImageName string `long:"tag" short:"t" default:"" value-name:"\"\"" default-mask:"-" description:"Repository name (and optionally a tag) to be applied to the resulting image in case of success"` DockerfileName string `long:"file" short:"f" default:"" value-name:"\"\"" default-mask:"-" description:"Customized docker file"` } var parser = gflag.NewParser(&opts, gflag.Default) parser.Usage = "build [OPTIONS] PATH\n\nBuild a new image from the source code at PATH" args, err := parser.ParseArgs(args) if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if len(args) == 0 { return fmt.Errorf("%s: \"build\" requires a minimum of 1 argument, See 'hyper build --help'.", os.Args[0]) } var ( filename = "" context archive.Archive name = "" ) root := args[0] if _, err := os.Stat(root); err != nil { return err } absRoot, err := filepath.Abs(root) if err != nil { return err } filename = opts.DockerfileName // path to Dockerfile if opts.DockerfileName == "" { // No -f/--file was specified so use the default opts.DockerfileName = api.DefaultDockerfileName filename = filepath.Join(absRoot, opts.DockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(filename); os.IsNotExist(err) { tmpFN := path.Join(absRoot, strings.ToLower(opts.DockerfileName)) if _, err = os.Lstat(tmpFN); err == nil { opts.DockerfileName = strings.ToLower(opts.DockerfileName) filename = tmpFN } } } origDockerfile := opts.DockerfileName // used for error msg if filename, err = filepath.Abs(filename); err != nil { return err } // Verify that 'filename' is within the build context filename, err = symlink.FollowSymlinkInScope(filename, absRoot) if err != nil { return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root) } // Now reset the dockerfileName to be relative to the build context opts.DockerfileName, err = filepath.Rel(absRoot, filename) if err != nil { return err } // And canonicalize dockerfile name to a platform-independent one opts.DockerfileName, err = archive.CanonicalTarNameForPath(opts.DockerfileName) if err != nil { return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", opts.DockerfileName, err) } if _, err = os.Lstat(filename); os.IsNotExist(err) { return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) } var includes = []string{"."} f, err := os.Open(filepath.Join(root, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } defer f.Close() var excludes []string if err == nil { excludes, err = utils.ReadDockerIgnore(f) if err != nil { return err } } if err := utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(opts.DockerfileName, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", opts.DockerfileName) } if err := utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } options := &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, } context, err = archive.TarWithOptions(root, options) if err != nil { return err } var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := streamformatter.NewStreamFormatter() body = progressreader.New(progressreader.Config{ In: context, Out: os.Stdout, Formatter: sf, NewLines: true, ID: "", Action: "Sending build context to Docker daemon", }) } if opts.ImageName == "" { // set a image name name = rand.RandStr(10, "alphanum") } else { name = opts.ImageName repository, tag := parsers.ParseRepositoryTag(name) if err := registry.ValidateRepositoryName(repository); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } v := url.Values{} v.Set("name", name) headers := http.Header(make(map[string][]string)) if context != nil { headers.Set("Content-Type", "application/tar") } err = cli.stream("POST", "/image/build?"+v.Encode(), body, cli.out, headers) if err != nil { return err } return nil }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } dest := args[len(args)-1] // last one is always the dest copyInfos := []*copyInfo{} b.Config.Image = b.image defer func() { for _, ci := range copyInfos { if ci.tmpDir != "" { os.RemoveAll(ci.tmpDir) } } }() // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files for _, orig := range args[0 : len(args)-1] { if err := calcCopyInfo( b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression, true, ); err != nil { glog.Error(err.Error()) return err } } if len(copyInfos) == 0 { return fmt.Errorf("No source files were specified") } if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one CI then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(copyInfos) == 1 { srcHash = copyInfos[0].hash origPaths = copyInfos[0].origPath } else { var hashs []string var origs []string for _, ci := range copyInfos { hashs = append(hashs, ci.hash) origs = append(origs, ci.origPath) } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.Config.Cmd if runtime.GOOS != "windows" { b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) } else { b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) } defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } b.Config.Image = b.image // config := *b.Config // Create the Pod podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha")) tempSrcDir := fmt.Sprintf("/var/run/hyper/temp/%s/", podId) if err := os.MkdirAll(tempSrcDir, 0755); err != nil { glog.Errorf(err.Error()) return err } if _, err := os.Stat(tempSrcDir); err != nil { glog.Errorf(err.Error()) return err } shellDir := fmt.Sprintf("/var/run/hyper/shell/%s/", podId) if err := os.MkdirAll(shellDir, 0755); err != nil { glog.Errorf(err.Error()) return err } copyshell, err1 := os.Create(shellDir + "/exec-copy.sh") if err1 != nil { glog.Errorf(err1.Error()) return err1 } fmt.Fprintf(copyshell, "#!/bin/sh\n") podString, err := MakeCopyPod(podId, b.image, b.Config.WorkingDir, tempSrcDir, dest, shellDir) if err != nil { return err } err = b.Hyperdaemon.CreatePod(podId, podString, false) if err != nil { return err } // Get the container var ( containerId = "" container *daemon.Container ) ps, ok := b.Hyperdaemon.PodList.GetStatus(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } for _, i := range ps.Containers { containerId = i.Id } container, err = b.Daemon.Get(containerId) if err != nil { glog.Error(err.Error()) return err } b.TmpContainers[container.ID] = struct{}{} b.TmpPods[podId] = struct{}{} for _, ci := range copyInfos { glog.V(1).Infof("container %s, origPath %s, destPath %s", container.ID, ci.origPath, ci.destPath) // Copy the source files to tempSrcDir if err := b.addContext(container, ci.origPath, tempSrcDir, ci.decompress); err != nil { glog.Error(err.Error()) return err } if strings.HasSuffix(dest, "/") == true { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", ci.origPath, ci.destPath)) } else { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", ci.origPath, dest)) break } } fmt.Fprintf(copyshell, "umount /tmp/src/\n") fmt.Fprintf(copyshell, "umount /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/src/\n") copyshell.Close() // start or replace pod vm, ok := b.Hyperdaemon.VmList[b.Name] if !ok { glog.Warningf("can not find VM(%s)", b.Name) bo := &hypervisor.BootConfig{ CPU: 1, Memory: 512, Kernel: b.Hyperdaemon.Kernel, Initrd: b.Hyperdaemon.Initrd, Bios: b.Hyperdaemon.Bios, Cbfs: b.Hyperdaemon.Cbfs, Vbox: b.Hyperdaemon.VboxImage, } vm = b.Hyperdaemon.NewVm(b.Name, 1, 512, false, types.VM_KEEP_AFTER_FINISH) err = vm.Launch(bo) if err != nil { return err } b.Hyperdaemon.AddVm(vm) } if vm.Status == types.S_VM_IDLE { code, cause, err := b.Hyperdaemon.StartPod(podId, "", b.Name, nil, false, false, types.VM_KEEP_AFTER_FINISH, []*hypervisor.TtyIO{}) if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } vm = b.Hyperdaemon.VmList[b.Name] // wait for cmd finish Status, err := vm.GetResponseChan() if err != nil { glog.Error(err.Error()) return err } defer vm.ReleaseResponseChan(Status) var vmResponse *types.VmResponse for { vmResponse = <-Status if vmResponse.VmId == b.Name { if vmResponse.Code == types.E_POD_FINISHED { glog.Infof("Got E_POD_FINISHED code response") break } } } pod, ok := b.Hyperdaemon.PodList.Get(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } pod.SetVM(b.Name, vm) // release pod from VM glog.Warningf("start stop pod") code, cause, err = b.Hyperdaemon.StopPod(podId, "no") if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } glog.Warningf("stop pod finish") } else { glog.Errorf("Vm is not IDLE") return fmt.Errorf("Vm is not IDLE") } glog.Warningf("begin commit") if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { return err } return nil }
// hyper run [OPTIONS] image [COMMAND] [ARGS...] func (cli *HyperClient) HyperCmdRun(args ...string) error { if len(args) == 0 { return fmt.Errorf("%s ERROR: Can not accept the 'run' command without argument!\n", os.Args[0]) } var opts struct { PodFile string `short:"p" long:"podfile" value-name:"\"\"" description:"Create and Run a pod based on the pod file"` K8s string `short:"k" long:"kubernetes" value-name:"\"\"" description:"Create and Run a pod based on the kubernetes pod file"` Yaml bool `short:"y" long:"yaml" default:"false" default-mask:"-" description:"Create a pod based on Yaml file"` Name string `long:"name" value-name:"\"\"" description:"Assign a name to the container"` Attach bool `long:"attach" default:"true" default-mask:"-" description:"Attach the stdin, stdout and stderr to the container"` Workdir string `long:"workdir" default:"/" value-name:"\"\"" default-mask:"-" description:"Working directory inside the container"` Tty bool `long:"tty" default:"true" default-mask:"-" description:"Allocate a pseudo-TTY"` Cpu int `long:"cpu" default:"1" value-name:"1" default-mask:"-" description:"CPU number for the VM"` Memory int `long:"memory" default:"128" value-name:"128" default-mask:"-" description:"Memory size (MB) for the VM"` Env []string `long:"env" value-name:"[]" default-mask:"-" description:"Set environment variables"` EntryPoint string `long:"entrypoint" value-name:"\"\"" default-mask:"-" description:"Overwrite the default ENTRYPOINT of the image"` RestartPolicy string `long:"restart" default:"never" value-name:"\"\"" default-mask:"-" description:"Restart policy to apply when a container exits (never, onFailure, always)"` Remove bool `long:"rm" default:"false" value-name:"" default-mask:"-" description:"Automatically remove the pod when it exits"` } var parser = gflag.NewParser(&opts, gflag.Default|gflag.IgnoreUnknown) parser.Usage = "run [OPTIONS] IMAGE [COMMAND] [ARG...]\n\nCreate a pod, and launch a new VM to run the pod" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if opts.PodFile != "" { if _, err := os.Stat(opts.PodFile); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.PodFile) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody), opts.Remove) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if opts.K8s != "" { var ( kpod pod.KPod userpod *pod.UserPod ) if _, err := os.Stat(opts.K8s); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.K8s) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } if err := json.Unmarshal(jsonbody, &kpod); err != nil { return err } userpod, err = kpod.Convert() if err != nil { return err } jsonbody, err = json.Marshal(*userpod) if err != nil { return err } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody), opts.Remove) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if len(args) == 0 { return fmt.Errorf("%s: \"run\" requires a minimum of 1 argument, please provide the image.", os.Args[0]) } var ( image = args[1] command = []string{} env = []pod.UserEnvironmentVar{} ) if len(args) > 1 { command = args[2:] } if opts.Name == "" { fields := strings.Split(image, ":") if len(fields) < 2 { opts.Name = image + "-" + utils.RandStr(10, "number") } else { opts.Name = fields[0] + "-" + fields[1] + "-" + utils.RandStr(10, "number") } } if opts.Memory == 0 { opts.Memory = 128 } if opts.Cpu == 0 { opts.Cpu = 1 } for _, v := range opts.Env { if v == "" || !strings.Contains(v, "=") { continue } userEnv := pod.UserEnvironmentVar{ Env: v[:strings.Index(v, "=")], Value: v[strings.Index(v, "=")+1:], } env = append(env, userEnv) } var containerList = []pod.UserContainer{} var container = pod.UserContainer{ Name: opts.Name, Image: image, Command: command, Workdir: opts.Workdir, Entrypoint: []string{}, Ports: []pod.UserContainerPort{}, Envs: env, Volumes: []pod.UserVolumeReference{}, Files: []pod.UserFileReference{}, RestartPolicy: opts.RestartPolicy, } containerList = append(containerList, container) var userPod = &pod.UserPod{ Name: opts.Name, Containers: containerList, Resource: pod.UserResource{Vcpu: opts.Cpu, Memory: opts.Memory}, Files: []pod.UserFile{}, Volumes: []pod.UserVolume{}, Tty: opts.Tty, } jsonString, _ := json.Marshal(userPod) podId, err := cli.RunPod(string(jsonString), opts.Remove) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) // Get the container ID of this POD containerId, err := cli.GetContainerByPod(podId) if err != nil { return err } var ( tag = cli.GetTag() hijacked = make(chan io.Closer) errCh chan error ) v := url.Values{} v.Set("type", "container") v.Set("value", containerId) v.Set("tag", tag) // Block the return until the chan gets closed defer func() { // fmt.Printf("End of CmdExec(), Waiting for hijack to finish.\n") if _, ok := <-hijacked; ok { fmt.Printf("Hijack did not finish (chan still open)\n") } }() errCh = promise.Go(func() error { return cli.hijack("POST", "/attach?"+v.Encode(), true, cli.in, cli.out, cli.out, hijacked, nil, "") }) if err := cli.monitorTtySize(podId, tag); err != nil { fmt.Printf("Monitor tty size fail for %s!\n", podId) } // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that hijack gets closed when returning. (result // in closing hijack chan and freeing server's goroutines. if closer != nil { defer closer.Close() } case err := <-errCh: if err != nil { fmt.Printf("Error hijack: %s", err.Error()) return err } } if err := <-errCh; err != nil { fmt.Printf("Error hijack: %s", err.Error()) return err } // fmt.Printf("Success to exec the command %s for POD %s!\n", command, podId) return nil }
func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { glog.Error(err.Error()) return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { glog.Error(err.Error()) return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { glog.Error(err.Error()) return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { glog.Error(err.Error()) return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { glog.Error(err.Error()) return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { glog.Error(err.Error()) return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewJSONStreamFormatter() hyper, err := GetDaemon() if err != nil { glog.Error(err.Error()) return err } vmId := "buildervm-" + rand.RandStr(10, "number") defer func() { glog.V(1).Infof("Kill VM(%s)...", vmId) hyper.KillVm(vmId) }() builder := &Builder{ Daemon: d, Name: vmId, Hyperdaemon: hyper, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfig: buildConfig.AuthConfig, ConfigFile: buildConfig.ConfigFile, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CpuShares, cpuPeriod: buildConfig.CpuPeriod, cpuQuota: buildConfig.CpuQuota, cpuSetCpus: buildConfig.CpuSetCpus, cpuSetMems: buildConfig.CpuSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) if err != nil { glog.Error(err.Error()) return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
// hyper run [OPTIONS] image [COMMAND] [ARGS...] func (cli *HyperClient) HyperCmdRun(args ...string) error { if len(args) == 0 { return fmt.Errorf("%s ERROR: Can not accept the 'run' command without argument!\n", os.Args[0]) } var opts struct { PodFile string `short:"p" long:"podfile" value-name:"\"\"" description:"Create and Run a pod based on the pod file"` K8s string `short:"k" long:"kubernetes" value-name:"\"\"" description:"Create and Run a pod based on the kubernetes pod file"` Yaml bool `short:"y" long:"yaml" default:"false" default-mask:"-" description:"Create a pod based on Yaml file"` Name string `long:"name" value-name:"\"\"" description:"Assign a name to the container"` Attach bool `long:"attach" default:"true" default-mask:"-" description:"Attach the stdin, stdout and stderr to the container"` Workdir string `long:"workdir" default:"/" value-name:"\"\"" default-mask:"-" description:"Working directory inside the container"` Tty bool `long:"tty" default:"true" default-mask:"-" description:"Allocate a pseudo-TTY"` Cpu int `long:"cpu" default:"1" value-name:"1" default-mask:"-" description:"CPU number for the VM"` Memory int `long:"memory" default:"128" value-name:"128" default-mask:"-" description:"Memory size (MB) for the VM"` Env []string `long:"env" value-name:"[]" default-mask:"-" description:"Set environment variables"` EntryPoint string `long:"entrypoint" value-name:"\"\"" default-mask:"-" description:"Overwrite the default ENTRYPOINT of the image"` RestartPolicy string `long:"restart" default:"never" value-name:"\"\"" default-mask:"-" description:"Restart policy to apply when a container exits (never, onFailure, always)"` Remove bool `long:"rm" default:"false" value-name:"" default-mask:"-" description:"Automatically remove the pod when it exits"` Portmap []string `long:"publish" value-name:"[]" default-mask:"-" description:"Publish a container's port to the host, format: --publish [tcp/udp:]hostPort:containerPort"` } var parser = gflag.NewParser(&opts, gflag.Default|gflag.IgnoreUnknown) parser.Usage = "run [OPTIONS] IMAGE [COMMAND] [ARG...]\n\nCreate a pod, and launch a new VM to run the pod" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if opts.PodFile != "" { if _, err := os.Stat(opts.PodFile); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.PodFile) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody), opts.Remove) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if opts.K8s != "" { var ( kpod pod.KPod userpod *pod.UserPod ) if _, err := os.Stat(opts.K8s); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.K8s) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } if err := json.Unmarshal(jsonbody, &kpod); err != nil { return err } userpod, err = kpod.Convert() if err != nil { return err } jsonbody, err = json.Marshal(*userpod) if err != nil { return err } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody), opts.Remove) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if len(args) == 0 { return fmt.Errorf("%s: \"run\" requires a minimum of 1 argument, please provide the image.", os.Args[0]) } var ( image = args[1] command = []string{} env = []pod.UserEnvironmentVar{} ports = []pod.UserContainerPort{} proto string hPort string cPort string ) if len(args) > 1 { command = args[2:] } if opts.Name == "" { opts.Name = image fields := strings.Split(image, "/") if len(fields) > 1 { opts.Name = fields[len(fields)-1] } fields = strings.Split(opts.Name, ":") if len(fields) < 2 { opts.Name = opts.Name + "-" + utils.RandStr(10, "number") } else { opts.Name = fields[0] + "-" + fields[1] + "-" + utils.RandStr(10, "number") } validContainerNameChars := `[a-zA-Z0-9][a-zA-Z0-9_.-]` validContainerNamePattern := regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) if !validContainerNamePattern.MatchString(opts.Name) { opts.Name = namesgenerator.GetRandomName(0) } } if opts.Memory == 0 { opts.Memory = 128 } if opts.Cpu == 0 { opts.Cpu = 1 } for _, v := range opts.Env { if v == "" || !strings.Contains(v, "=") { continue } userEnv := pod.UserEnvironmentVar{ Env: v[:strings.Index(v, "=")], Value: v[strings.Index(v, "=")+1:], } env = append(env, userEnv) } for _, v := range opts.Portmap { port := pod.UserContainerPort{} fields := strings.Split(v, ":") if len(fields) < 2 { return fmt.Errorf("flag needs host port and container port: --publish") } else if len(fields) == 2 { proto = "tcp" hPort = fields[0] cPort = fields[1] } else { proto = fields[0] if proto != "tcp" && proto != "udp" { return fmt.Errorf("flag needs protocol(tcp or udp): --publish") } hPort = fields[1] cPort = fields[2] } port.Protocol = proto port.HostPort, err = strconv.Atoi(hPort) if err != nil { return fmt.Errorf("flag needs host port and container port: --publish") } port.ContainerPort, err = strconv.Atoi(cPort) if err != nil { return fmt.Errorf("flag needs host port and container port: --publish") } ports = append(ports, port) } var containerList = []pod.UserContainer{} var container = pod.UserContainer{ Name: opts.Name, Image: image, Command: command, Workdir: opts.Workdir, Entrypoint: []string{}, Ports: ports, Envs: env, Volumes: []pod.UserVolumeReference{}, Files: []pod.UserFileReference{}, RestartPolicy: opts.RestartPolicy, } containerList = append(containerList, container) var userPod = &pod.UserPod{ Name: opts.Name, Containers: containerList, Resource: pod.UserResource{Vcpu: opts.Cpu, Memory: opts.Memory}, Files: []pod.UserFile{}, Volumes: []pod.UserVolume{}, Tty: opts.Tty, } jsonString, _ := json.Marshal(userPod) podId, err := cli.CreatePod(string(jsonString)) if err != nil { return err } if opts.Remove { defer func() { cli.HyperCmdRm(podId) }() } _, err = cli.StartPod(podId, "", true) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) return nil }