func (daemon *Daemon) CmdPodRun(job *engine.Job) error { // we can only support 1024 Pods if daemon.GetRunningPodNum() >= 1024 { return fmt.Errorf("Pod full, the maximum Pod is 1024!") } podArgs := job.Args[0] vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) glog.Info(podArgs) code, cause, err := daemon.StartPod(podId, vmId, podArgs) if err != nil { daemon.KillVm(vmId) glog.Error(err.Error()) return err } if err := daemon.WritePodAndContainers(podId); err != nil { glog.V(1).Info("Found an error while saveing the Containers info") return err } data, err := daemon.GetPodByName(podId) if err != nil { return err } userPod, err := pod.ProcessPodBytes(data) if err != nil { return err } vm := &Vm{ Id: vmId, Pod: daemon.podList[podId], Status: types.S_VM_ASSOCIATED, Cpu: userPod.Resource.Vcpu, Mem: userPod.Resource.Memory, } daemon.podList[podId].Vm = vmId daemon.AddVm(vm) // Prepare the qemu status to client v := &engine.Env{} v.Set("ID", podId) v.SetInt("Code", code) v.Set("Cause", cause) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (daemon *Daemon) CmdPodCreate(job *engine.Job) error { // we can only support 1024 Pods if daemon.GetRunningPodNum() >= 1024 { return fmt.Errorf("Pod full, the maximum Pod is 1024!") } podArgs := job.Args[0] wg := new(sync.WaitGroup) podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) err := daemon.CreatePod(podArgs, podId, wg) if err != nil { return err } if err := daemon.WritePodAndContainers(podId); err != nil { glog.V(1).Info("Found an error while saveing the Containers info") return err } // Prepare the qemu status to client v := &engine.Env{} v.Set("ID", podId) v.SetInt("Code", 0) v.Set("Cause", "") if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (daemon *Daemon) CmdVmCreate(job *engine.Job) (err error) { var ( vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) qemuPodEvent = make(chan qemu.QemuEvent, 128) qemuStatus = make(chan *types.QemuResponse, 128) subQemuStatus = make(chan *types.QemuResponse, 128) cpu = 1 mem = 128 ) if job.Args[0] != "" { cpu, err = strconv.Atoi(job.Args[0]) if err != nil { return err } } if job.Args[1] != "" { mem, err = strconv.Atoi(job.Args[1]) if err != nil { return err } } b := &qemu.BootConfig{ CPU: cpu, Memory: mem, Kernel: daemon.kernel, Initrd: daemon.initrd, Bios: daemon.bios, Cbfs: daemon.cbfs, } go qemu.QemuLoop(vmId, qemuPodEvent, qemuStatus, b) if err := daemon.SetQemuChan(vmId, qemuPodEvent, qemuStatus, subQemuStatus); err != nil { glog.V(1).Infof("SetQemuChan error: %s", err.Error()) return err } vm := &Vm{ Id: vmId, Pod: nil, Status: types.S_VM_IDLE, Cpu: cpu, Mem: mem, } daemon.AddVm(vm) // Prepare the qemu status to client v := &engine.Env{} v.Set("ID", vmId) v.SetInt("Code", 0) v.Set("Cause", "") if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
// The caller must make sure that the restart policy and the status is right to restart func (daemon *Daemon) RestartPod(mypod *Pod) error { // Remove the pod // The pod is stopped, the vm is gone for _, c := range mypod.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err := daemon.dockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } daemon.RemovePod(mypod.Id) daemon.DeletePodContainerFromDB(mypod.Id) daemon.DeleteVolumeId(mypod.Id) podData, err := daemon.GetPodByName(mypod.Id) vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) // Start the pod _, _, err = daemon.StartPod(mypod.Id, vmId, string(podData)) if err != nil { daemon.KillVm(vmId) glog.Error(err.Error()) return err } if err := daemon.WritePodAndContainers(mypod.Id); err != nil { glog.Error("Found an error while saving the Containers info") return err } userPod, err := pod.ProcessPodBytes(podData) if err != nil { return err } vm := &Vm{ Id: vmId, Pod: daemon.podList[mypod.Id], Status: types.S_VM_ASSOCIATED, Cpu: userPod.Resource.Vcpu, Mem: userPod.Resource.Memory, } daemon.podList[mypod.Id].Vm = vmId daemon.AddVm(vm) return nil }
func (daemon *Daemon) CmdPodStart(job *engine.Job) error { // we can only support 1024 Pods if daemon.GetRunningPodNum() >= 1024 { return fmt.Errorf("Pod full, the maximum Pod is 1024!") } podId := job.Args[0] vmId := job.Args[1] glog.Info("pod:%s, vm:%s", podId, vmId) // Do the status check for the given pod if pod, ok := daemon.podList[podId]; ok { if pod.Status == types.S_POD_RUNNING { return fmt.Errorf("The pod(%s) is running, can not start it", podId) } else { if pod.Type == "kubernetes" && pod.Status != types.S_POD_CREATED { return fmt.Errorf("The pod(%s) is finished with kubernetes type, can not start it again", podId) } } } else { return fmt.Errorf("The pod(%s) can not be found, please create it first", podId) } data, err := daemon.GetPodByName(podId) if err != nil { return err } userPod, err := pod.ProcessPodBytes(data) if err != nil { return err } if vmId == "" { vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) } else { if _, ok := daemon.vmList[vmId]; !ok { return fmt.Errorf("The VM %s doesn't exist", vmId) } if userPod.Resource.Vcpu != daemon.vmList[vmId].Cpu { return fmt.Errorf("The new pod's cpu setting is different the current VM's cpu") } if userPod.Resource.Memory != daemon.vmList[vmId].Mem { return fmt.Errorf("The new pod's memory setting is different the current VM's memory") } } code, cause, err := daemon.StartPod(podId, vmId, "") if err != nil { daemon.KillVm(vmId) glog.Error(err.Error()) return err } vm := &Vm{ Id: vmId, Pod: daemon.podList[podId], Status: types.S_VM_ASSOCIATED, Cpu: userPod.Resource.Vcpu, Mem: userPod.Resource.Memory, } daemon.podList[podId].Vm = vmId daemon.AddVm(vm) // Prepare the qemu status to client v := &engine.Env{} v.Set("ID", vmId) v.SetInt("Code", code) v.Set("Cause", cause) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (daemon *Daemon) StartPod(podId, vmId, podArgs string) (int, string, error) { var ( fstype string poolName string volPoolName string devPrefix string storageDriver string rootPath string devFullName string rootfs string containerInfoList = []*hypervisor.ContainerInfo{} volumuInfoList = []*hypervisor.VolumeInfo{} cli = daemon.dockerCli qemuPodEvent = make(chan hypervisor.VmEvent, 128) qemuStatus = make(chan *types.QemuResponse, 128) subQemuStatus = make(chan *types.QemuResponse, 128) sharedDir = path.Join(hypervisor.BaseDir, vmId, hypervisor.ShareDirTag) podData []byte mypod *Pod wg *sync.WaitGroup err error uid string gid string ) if podArgs == "" { mypod = daemon.podList[podId] if mypod == nil { return -1, "", fmt.Errorf("Can not find the POD instance of %s", podId) } podData, err = daemon.GetPodByName(podId) if err != nil { return -1, "", err } wg = mypod.Wg } else { podData = []byte(podArgs) } userPod, err := pod.ProcessPodBytes(podData) if err != nil { return -1, "", err } vm := daemon.vmList[vmId] if vm == nil { glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.kernel, daemon.initrd) var ( cpu = 1 mem = 128 ) if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } b := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: daemon.kernel, Initrd: daemon.initrd, Bios: daemon.bios, Cbfs: daemon.cbfs, } go hypervisor.VmLoop(hypervisorDriver, vmId, qemuPodEvent, qemuStatus, b) if err := daemon.SetQemuChan(vmId, qemuPodEvent, qemuStatus, subQemuStatus); err != nil { glog.V(1).Infof("SetQemuChan error: %s", err.Error()) return -1, "", err } } else { ret1, ret2, ret3, err := daemon.GetQemuChan(vmId) if err != nil { return -1, "", err } qemuPodEvent = ret1.(chan hypervisor.VmEvent) qemuStatus = ret2.(chan *types.QemuResponse) subQemuStatus = ret3.(chan *types.QemuResponse) } if podArgs != "" { wg = new(sync.WaitGroup) if err := daemon.CreatePod(podArgs, podId, wg); err != nil { glog.Error(err.Error()) return -1, "", err } mypod = daemon.podList[podId] } storageDriver = daemon.Storage.StorageType if storageDriver == "devicemapper" { poolName = daemon.Storage.PoolName fstype = daemon.Storage.Fstype volPoolName = "hyper-volume-pool" devPrefix = poolName[:strings.Index(poolName, "-pool")] rootPath = "/var/lib/docker/devicemapper" rootfs = "/rootfs" } else if storageDriver == "aufs" { rootPath = daemon.Storage.RootPath fstype = daemon.Storage.Fstype rootfs = "" } else if storageDriver == "overlay" { rootPath = daemon.Storage.RootPath fstype = daemon.Storage.Fstype rootfs = "" } // Process the 'Files' section files := make(map[string](pod.UserFile)) for _, v := range userPod.Files { files[v.Name] = v } for i, c := range mypod.Containers { var jsonResponse *docker.ConfigJSON if jsonResponse, err = cli.GetContainerInfo(c.Id); err != nil { glog.Error("got error when get container Info ", err.Error()) return -1, "", err } if storageDriver == "devicemapper" { if err := dm.CreateNewDevice(c.Id, devPrefix, rootPath); err != nil { return -1, "", err } devFullName, err = dm.MountContainerToSharedDir(c.Id, sharedDir, devPrefix) if err != nil { glog.Error("got error when mount container to share dir ", err.Error()) return -1, "", err } fstype, err = dm.ProbeFsType(devFullName) if err != nil { fstype = "ext4" } } else if storageDriver == "aufs" { devFullName, err = aufs.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "") if err != nil { glog.Error("got error when mount container to share dir ", err.Error()) return -1, "", err } devFullName = "/" + c.Id + "/rootfs" } else if storageDriver == "overlay" { devFullName, err = overlay.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "") if err != nil { glog.Error("got error when mount container to share dir ", err.Error()) return -1, "", err } devFullName = "/" + c.Id + "/rootfs" } for _, f := range userPod.Containers[i].Files { targetPath := f.Path file, ok := files[f.Filename] if !ok { continue } var fromFile = "/tmp/" + file.Name defer os.RemoveAll(fromFile) if file.Uri != "" { err = utils.DownloadFile(file.Uri, fromFile) if err != nil { return -1, "", err } } else if file.Contents != "" { err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666) if err != nil { return -1, "", err } } else { continue } // we need to decode the content fi, err := os.Open(fromFile) if err != nil { return -1, "", err } defer fi.Close() fileContent, err := ioutil.ReadAll(fi) if err != nil { return -1, "", err } if file.Encoding == "base64" { newContent, err := utils.Base64Decode(string(fileContent)) if err != nil { return -1, "", err } err = ioutil.WriteFile(fromFile, []byte(newContent), 0666) if err != nil { return -1, "", err } } else { err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666) if err != nil { return -1, "", err } } // get the uid and gid for that attached file fileUser := f.User fileGroup := f.Group u, _ := user.Current() if fileUser == "" { uid = u.Uid } else { u, _ = user.Lookup(fileUser) uid = u.Uid gid = u.Gid } if fileGroup == "" { gid = u.Gid } if storageDriver == "devicemapper" { err := dm.AttachFiles(c.Id, devPrefix, fromFile, targetPath, rootPath, f.Perm, uid, gid) if err != nil { glog.Error("got error when attach files ", err.Error()) return -1, "", err } } else if storageDriver == "aufs" { err := aufs.AttachFiles(c.Id, fromFile, targetPath, rootPath, f.Perm, uid, gid) if err != nil { glog.Error("got error when attach files ", err.Error()) return -1, "", err } } else if storageDriver == "overlay" { err := overlay.AttachFiles(c.Id, fromFile, targetPath, rootPath, f.Perm, uid, gid) if err != nil { glog.Error("got error when attach files ", err.Error()) return -1, "", err } } } env := make(map[string]string) for _, v := range jsonResponse.Config.Env { env[v[:strings.Index(v, "=")]] = v[strings.Index(v, "=")+1:] } for _, e := range userPod.Containers[i].Envs { env[e.Env] = e.Value } glog.V(1).Infof("Parsing envs for container %d: %d Evs", i, len(env)) glog.V(1).Infof("The fs type is %s", fstype) glog.V(1).Infof("WorkingDir is %s", string(jsonResponse.Config.WorkingDir)) glog.V(1).Infof("Image is %s", string(devFullName)) containerInfo := &hypervisor.ContainerInfo{ Id: c.Id, Rootfs: rootfs, Image: devFullName, Fstype: fstype, Workdir: jsonResponse.Config.WorkingDir, Entrypoint: jsonResponse.Config.Entrypoint, Cmd: jsonResponse.Config.Cmd, Envs: env, } glog.V(1).Infof("Container Info is \n%v", containerInfo) containerInfoList = append(containerInfoList, containerInfo) glog.V(1).Infof("container %d created %s, workdir %s, env: %v", i, c.Id, jsonResponse.Config.WorkingDir, env) } // Process the 'Volumes' section for _, v := range userPod.Volumes { if v.Source == "" { if storageDriver == "devicemapper" { volName := fmt.Sprintf("%s-%s-%s", volPoolName, podId, v.Name) dev_id, _ := daemon.GetVolumeId(podId, volName) glog.Error("DeviceID is %d", dev_id) if dev_id < 1 { dev_id, _ = daemon.GetMaxDeviceId() err := daemon.CreateVolume(podId, volName, fmt.Sprintf("%d", dev_id+1), false) if err != nil { return -1, "", err } } else { err := daemon.CreateVolume(podId, volName, fmt.Sprintf("%d", dev_id), true) if err != nil { return -1, "", err } } fstype, err = dm.ProbeFsType("/dev/mapper/" + volName) if err != nil { fstype = "ext4" } myVol := &hypervisor.VolumeInfo{ Name: v.Name, Filepath: path.Join("/dev/mapper/", volName), Fstype: fstype, Format: "raw", } volumuInfoList = append(volumuInfoList, myVol) glog.V(1).Infof("volume %s created with dm as %s", v.Name, volName) continue } else { // Make sure the v.Name is given v.Source = path.Join("/var/tmp/hyper/", v.Name) if _, err := os.Stat(v.Source); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(v.Source, os.FileMode(0777)); err != nil { return -1, "", err } } v.Driver = "vfs" } } if v.Driver != "vfs" { glog.V(1).Infof("bypass %s volume %s", v.Driver, v.Name) continue } // Process the situation if the source is not NULL, we need to bind that dir to sharedDir var flags uintptr = syscall.MS_BIND mountSharedDir := pod.RandStr(10, "alpha") targetDir := path.Join(sharedDir, mountSharedDir) glog.V(1).Infof("trying to bind dir %s to %s", v.Source, targetDir) if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) { glog.Errorf("error to create dir %s for volume %s", targetDir, v.Name) return -1, "", err } if err := syscall.Mount(v.Source, targetDir, "dir", flags, "--bind"); err != nil { glog.Errorf("bind dir %s failed: %s", v.Source, err.Error()) return -1, "", err } myVol := &hypervisor.VolumeInfo{ Name: v.Name, Filepath: mountSharedDir, Fstype: "dir", Format: "", } glog.V(1).Infof("dir %s is bound to %s", v.Source, targetDir) volumuInfoList = append(volumuInfoList, myVol) } go func(interface{}) { for { qemuResponse := <-qemuStatus subQemuStatus <- qemuResponse if qemuResponse.Code == types.E_POD_FINISHED { data := qemuResponse.Data.([]uint32) daemon.SetPodContainerStatus(podId, data) daemon.podList[podId].Vm = "" } else if qemuResponse.Code == types.E_VM_SHUTDOWN { if daemon.podList[podId].Status == types.S_POD_RUNNING { daemon.podList[podId].Status = types.S_POD_SUCCEEDED daemon.SetContainerStatus(podId, types.S_POD_SUCCEEDED) } daemon.podList[podId].Vm = "" daemon.RemoveVm(vmId) daemon.DeleteQemuChan(vmId) mypod = daemon.podList[podId] if mypod.Type == "kubernetes" { switch mypod.Status { case types.S_POD_SUCCEEDED: if mypod.RestartPolicy == "always" { daemon.RestartPod(mypod) } else { daemon.DeletePodFromDB(podId) for _, c := range daemon.podList[podId].Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } // daemon.RemovePod(podId) daemon.DeletePodContainerFromDB(podId) daemon.DeleteVolumeId(podId) } break case types.S_POD_FAILED: if mypod.RestartPolicy != "never" { daemon.RestartPod(mypod) } else { daemon.DeletePodFromDB(podId) for _, c := range daemon.podList[podId].Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } // daemon.RemovePod(podId) daemon.DeletePodContainerFromDB(podId) daemon.DeleteVolumeId(podId) } break default: break } } break } } }(subQemuStatus) if daemon.podList[podId].Type == "kubernetes" { for _, c := range userPod.Containers { c.RestartPolicy = "never" } } fmt.Printf("POD id is %s\n", podId) runPodEvent := &hypervisor.RunPodCommand{ Spec: userPod, Containers: containerInfoList, Volumes: volumuInfoList, Wg: wg, } qemuPodEvent <- runPodEvent daemon.podList[podId].Status = types.S_POD_RUNNING // Set the container status to online daemon.SetContainerStatus(podId, types.S_POD_RUNNING) // wait for the qemu response var qemuResponse *types.QemuResponse for { qemuResponse = <-subQemuStatus glog.V(1).Infof("Get the response from QEMU, VM id is %s!", qemuResponse.VmId) if qemuResponse.Code == types.E_VM_RUNNING { continue } if qemuResponse.VmId == vmId { break } } if qemuResponse.Data == nil { return qemuResponse.Code, qemuResponse.Cause, fmt.Errorf("QEMU response data is nil") } data := qemuResponse.Data.([]byte) daemon.UpdateVmData(vmId, data) // add or update the Vm info for POD if err := daemon.UpdateVmByPod(podId, vmId); err != nil { glog.Error(err.Error()) } // XXX we should not close qemuStatus chan, it will be closed in shutdown process return qemuResponse.Code, qemuResponse.Cause, nil }
// hyper run [OPTIONS] image [COMMAND] [ARGS...] func (cli *HyperClient) HyperCmdRun(args ...string) error { if len(args) == 0 { return fmt.Errorf("%s ERROR: Can not accept the 'run' command without argument!\n", os.Args[0]) } var opts struct { PodFile string `short:"p" long:"podfile" value-name:"\"\"" description:"Create and Run a pod based on the pod file"` K8s string `short:"k" long:"kubernetes" value-name:"\"\"" description:"Create and Run a pod based on the kubernetes pod file"` Yaml bool `short:"y" long:"yaml" default:"false" default-mask:"-" description:"Create a pod based on Yaml file"` Name string `long:"name" value-name:"\"\"" description:"Assign a name to the container"` Attach bool `long:"attach" default:"true" default-mask:"-" description:"Attach the stdin, stdout and stderr to the container"` Workdir string `long:"workdir" default:"/" value-name:"\"\"" default-mask:"-" description:"Working directory inside the container"` Tty bool `long:"tty" default:"true" default-mask:"-" description:"Allocate a pseudo-TTY"` Cpu int `long:"cpu" default:"1" value-name:"1" default-mask:"-" description:"CPU number for the VM"` Memory int `long:"memory" default:"128" value-name:"128" default-mask:"-" description:"Memory size (MB) for the VM"` Env []string `long:"env" value-name:"[]" default-mask:"-" description:"Set environment variables"` EntryPoint string `long:"entrypoint" value-name:"\"\"" default-mask:"-" description:"Overwrite the default ENTRYPOINT of the image"` RestartPolicy string `long:"restart" default:"never" value-name:"\"\"" default-mask:"-" description:"Restart policy to apply when a container exits (never, onFailure, always)"` } var parser = gflag.NewParser(&opts, gflag.Default|gflag.IgnoreUnknown) parser.Usage = "run [OPTIONS] IMAGE [COMMAND] [ARG...]\n\ncreate a pod, and launch a new VM to run the pod" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if opts.PodFile != "" { if _, err := os.Stat(opts.PodFile); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.PodFile) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody)) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if opts.K8s != "" { var ( kpod pod.KPod pod *pod.UserPod ) if _, err := os.Stat(opts.K8s); err != nil { return err } jsonbody, err := ioutil.ReadFile(opts.K8s) if err != nil { return err } if opts.Yaml == true { jsonbody, err = cli.ConvertYamlToJson(jsonbody) if err != nil { return err } } if err := json.Unmarshal(jsonbody, &kpod); err != nil { return err } pod, err = kpod.Convert() if err != nil { return err } jsonbody, err = json.Marshal(*pod) if err != nil { return err } t1 := time.Now() podId, err := cli.RunPod(string(jsonbody)) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) t2 := time.Now() fmt.Printf("Time to run a POD is %d ms\n", (t2.UnixNano()-t1.UnixNano())/1000000) return nil } if len(args) == 0 { return fmt.Errorf("%s: \"run\" requires a minimum of 1 argument, please provide the image.", os.Args[0]) } var ( image = args[1] command = []string{} env = []pod.UserEnvironmentVar{} ) if len(args) > 2 { command = args[2:] } if opts.Name == "" { opts.Name = image + "-" + pod.RandStr(10, "number") } if opts.Memory == 0 { opts.Memory = 128 } if opts.Cpu == 0 { opts.Cpu = 1 } for _, v := range opts.Env { if v == "" || !strings.Contains(v, "=") { continue } userEnv := pod.UserEnvironmentVar{ Env: v[:strings.Index(v, "=")], Value: v[strings.Index(v, "=")+1:], } env = append(env, userEnv) } var containerList = []pod.UserContainer{} var container = pod.UserContainer{ Name: opts.Name, Image: image, Command: command, Workdir: opts.Workdir, Entrypoint: []string{}, Ports: []pod.UserContainerPort{}, Envs: env, Volumes: []pod.UserVolumeReference{}, Files: []pod.UserFileReference{}, RestartPolicy: opts.RestartPolicy, } containerList = append(containerList, container) var userPod = &pod.UserPod{ Name: opts.Name, Containers: containerList, Resource: pod.UserResource{Vcpu: opts.Cpu, Memory: opts.Memory}, Files: []pod.UserFile{}, Volumes: []pod.UserVolume{}, Tty: opts.Tty, } /* if err := userPod.Validate(); err != nil { return err } */ jsonString, _ := json.Marshal(userPod) podId, err := cli.RunPod(string(jsonString)) if err != nil { return err } fmt.Printf("POD id is %s\n", podId) // Get the container ID of this POD containerId, err := cli.GetContainerByPod(podId) if err != nil { return err } var ( tag = cli.GetTag() hijacked = make(chan io.Closer) errCh chan error ) v := url.Values{} v.Set("type", "container") v.Set("value", containerId) v.Set("tag", tag) // Block the return until the chan gets closed defer func() { // fmt.Printf("End of CmdExec(), Waiting for hijack to finish.\n") if _, ok := <-hijacked; ok { fmt.Printf("Hijack did not finish (chan still open)\n") } }() errCh = promise.Go(func() error { return cli.hijack("POST", "/attach?"+v.Encode(), true, cli.in, cli.out, cli.out, hijacked, nil, "") }) if err := cli.monitorTtySize(podId, tag); err != nil { fmt.Printf("Monitor tty size fail for %s!\n", podId) } // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that hijack gets closed when returning. (result // in closing hijack chan and freeing server's goroutines. if closer != nil { defer closer.Close() } case err := <-errCh: if err != nil { fmt.Printf("Error hijack: %s", err.Error()) return err } } if err := <-errCh; err != nil { fmt.Printf("Error hijack: %s", err.Error()) return err } // fmt.Printf("Success to exec the command %s for POD %s!\n", command, podId) return nil }
func (cli *HyperClient) GetTag() string { return pod.RandStr(8, "alphanum") }