func (p *Pod) init(data interface{}, autoremove bool) error { if err := p.spec.Validate(); err != nil { return err } if err := p.preprocess(); err != nil { return err } resPath := filepath.Join(DefaultResourcePath, p.id) if err := os.MkdirAll(resPath, os.FileMode(0755)); err != nil { glog.Error("cannot create resource dir ", resPath) return err } status := hypervisor.NewPod(p.id, p.spec) status.Handler.Handle = hyperHandlePodEvent status.Handler.Data = data status.Autoremove = autoremove status.ResourcePath = resPath p.status = status return nil }
func (p *Pod) init(data interface{}) error { if err := p.Spec.Validate(); err != nil { return err } if err := p.preprocess(); err != nil { return err } p.PodStatus = hypervisor.NewPod(p.Id, p.Spec, &hypervisor.HandleEvent{hyperHandlePodEvent, data}) return nil }
func CreatePod(daemon *Daemon, dclient DockerInterface, podId, podArgs string, autoremove bool) (*Pod, error) { glog.V(1).Infof("podArgs: %s", podArgs) resPath := filepath.Join(DefaultResourcePath, podId) if err := os.MkdirAll(resPath, os.FileMode(0755)); err != nil { glog.Error("cannot create resource dir ", resPath) return nil, err } spec, err := ProcessPodBytes([]byte(podArgs), podId) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return nil, err } if err = spec.Validate(); err != nil { return nil, err } status := hypervisor.NewPod(podId, spec) status.Handler.Handle = hyperHandlePodEvent status.Autoremove = autoremove status.ResourcePath = resPath pod := &Pod{ id: podId, status: status, spec: spec, } if err = pod.InitContainers(daemon, dclient); err != nil { return nil, err } return pod, nil }
func (daemon *Daemon) CreatePod(podId, podArgs string, config interface{}, autoremove bool) error { glog.V(1).Infof("podArgs: %s", podArgs) userPod, err := pod.ProcessPodBytes([]byte(podArgs)) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return err } if err := userPod.Validate(); err != nil { return err } mypod := hypervisor.NewPod(podId, userPod) mypod.Handler.Handle = hyperHandlePodEvent mypod.Handler.Data = daemon mypod.Autoremove = autoremove // store the UserPod into the db if err := daemon.WritePodToDB(podId, []byte(podArgs)); err != nil { glog.V(1).Info("Found an error while saveing the POD file") return err } containerIds, err := daemon.GetPodContainersByName(podId) if err != nil { glog.V(1).Info(err.Error()) } if containerIds != nil { for _, id := range containerIds { var ( name string image string ) if jsonResponse, err := daemon.DockerCli.GetContainerInfo(id); err == nil { name = jsonResponse.Name image = jsonResponse.Config.Image } mypod.AddContainer(id, name, image, []string{}, types.S_POD_CREATED) } } else { // Process the 'Containers' section glog.V(1).Info("Process the Containers section in POD SPEC\n") for _, c := range userPod.Containers { imgName := c.Image cId, _, err := daemon.DockerCli.SendCmdCreate(c.Name, imgName, []string{}, nil) if err != nil { glog.Error(err.Error()) daemon.DeletePodFromDB(podId) return err } mypod.AddContainer(string(cId), c.Name, imgName, []string{}, types.S_POD_CREATED) } } daemon.AddPod(mypod) if err = daemon.WritePodAndContainers(podId); err != nil { glog.V(1).Info("Found an error while saveing the Containers info") return err } return nil }
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) { podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) userPod := pod.ConvertOCF2PureUserPod(spec) podStatus := hypervisor.NewPod(podId, userPod, nil) cpu := defaultCpus if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } mem := defaultMemory if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } kernel := chooseKernel(spec) initrd := chooseInitrd(spec) glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem) var ( vm *hypervisor.Vm err error ) if len(kernel) == 0 && len(initrd) == 0 { vm, err = f.GetVm(cpu, mem) if err != nil { glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with default kernel config") } else if len(kernel) == 0 || len(initrd) == 0 { // if user specify a kernel, they must specify an initrd at the same time return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa") } else { boot := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: kernel, Initrd: initrd, } vm, err = hypervisor.GetVm("", boot, true, false) if err != nil { glog.V(1).Infof("Create VM failed: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with specific kernel config") } Response := vm.StartPod(podStatus, userPod, nil, nil) if Response.Data == nil { vm.Kill() glog.V(1).Infof("StartPod fail: QEMU response data is nil\n") return nil, fmt.Errorf("StartPod fail") } glog.V(1).Infof("result: code %d %s\n", Response.Code, Response.Cause) hp := &HyperPod{ userPod: userPod, podStatus: podStatus, vm: vm, Containers: make(map[string]*Container), Processes: make(map[string]*Process), } // create Listener process running in its own netns if err = hp.startNsListener(); err != nil { hp.reap() glog.V(1).Infof("start ns listener fail: %s\n", err.Error()) return nil, err } return hp, nil }
func main() { hypervisor.InterfaceCount = 0 var containerInfoList []*hypervisor.ContainerInfo var roots []string var containerId string var err error ocffile := flag.String("config", "", "ocf configure file") kernel := flag.String("kernel", "", "hyper kernel") initrd := flag.String("initrd", "", "hyper initrd") vbox := flag.String("vbox", "", "vbox boot iso") driver := flag.String("driver", "", "hypervisor driver") flag.Parse() if *ocffile == "" { *ocffile = "config.json" } if _, err = os.Stat(*ocffile); os.IsNotExist(err) { fmt.Printf("Please specify ocffile or put config.json under current working directory\n") return } if *vbox == "" { *vbox = "./vbox.iso" } if _, err = os.Stat(*vbox); err == nil { *vbox, err = filepath.Abs(*vbox) if err != nil { fmt.Printf("Cannot get abs path for vbox: %s\n", err.Error()) return } } if *kernel == "" { *kernel = "./kernel" } if _, err = os.Stat(*kernel); err == nil { *kernel, err = filepath.Abs(*kernel) if err != nil { fmt.Printf("Cannot get abs path for kernel: %s\n", err.Error()) return } } if *initrd == "" { *initrd = "./initrd.img" } if _, err = os.Stat(*initrd); err == nil { *initrd, err = filepath.Abs(*initrd) if err != nil { fmt.Printf("Cannot get abs path for initrd: %s\n", err.Error()) return } } if *driver == "" { *driver = "kvm" fmt.Printf("Use default hypervisor KVM\n") } if hypervisor.HDriver, err = driverloader.Probe(*driver); err != nil { fmt.Printf("%s\n", err.Error()) return } podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) ocfData, err := ioutil.ReadFile(*ocffile) if err != nil { fmt.Printf("%s\n", err.Error()) return } userPod, err := pod.OCFConvert2Pod(ocfData) if err != nil { fmt.Printf("%s\n", err.Error()) return } mypod := hypervisor.NewPod(podId, userPod) var ( cpu = 1 mem = 128 ) if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } b := &hypervisor.BootConfig{ Kernel: *kernel, Initrd: *initrd, Bios: "", Cbfs: "", Vbox: *vbox, CPU: cpu, Memory: mem, } vm := hypervisor.NewVm(vmId, cpu, mem, false, types.VM_KEEP_NONE) err = vm.Launch(b) if err != nil { fmt.Printf("%s\n", err.Error()) return } sharedDir := path.Join(hypervisor.BaseDir, vm.Id, hypervisor.ShareDirTag) for _, c := range userPod.Containers { var root string var err error containerId = GenerateRandomID() rootDir := path.Join(sharedDir, containerId) os.MkdirAll(rootDir, 0755) rootDir = path.Join(rootDir, "rootfs") if !filepath.IsAbs(c.Image) { root, err = filepath.Abs(c.Image) if err != nil { fmt.Printf("%s\n", err.Error()) return } } else { root = c.Image } err = mount(root, rootDir) if err != nil { fmt.Printf("mount %s to %s failed: %s\n", root, rootDir, err.Error()) return } roots = append(roots, rootDir) containerInfo := &hypervisor.ContainerInfo{ Id: containerId, Rootfs: "rootfs", Image: containerId, Fstype: "dir", } containerInfoList = append(containerInfoList, containerInfo) mypod.AddContainer(containerId, podId, "", []string{}, types.S_POD_CREATED) } qemuResponse := vm.StartPod(mypod, userPod, containerInfoList, nil) if qemuResponse.Data == nil { fmt.Printf("StartPod fail: QEMU response data is nil\n") return } fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause) inFd, _ := term.GetFdInfo(os.Stdin) outFd, isTerminalOut := term.GetFdInfo(os.Stdout) oldState, err := term.SetRawTerminal(inFd) if err != nil { return } height, width := getTtySize(outFd, isTerminalOut) winSize := &hypervisor.WindowSize{ Row: uint16(height), Column: uint16(width), } tag := pod.RandStr(8, "alphanum") monitorTtySize(vm, tag, outFd, isTerminalOut) vm.Attach(os.Stdin, os.Stdout, tag, containerId, winSize) qemuResponse = vm.StopPod(mypod, "yes") term.RestoreTerminal(inFd, oldState) for _, root := range roots { umount(root) } if qemuResponse.Data == nil { fmt.Printf("StopPod fail: QEMU response data is nil\n") return } fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause) }
func startRunvPod(context *nsContext, config *startConfig) (err error) { context.lock.Lock() defer context.lock.Unlock() if context.firstConfig == nil { context.firstConfig = config } else { // check stopped if len(context.actives) == 0 { return fmt.Errorf("The namespace service was stopped") } // check match if config.Root != "" && config.Root != context.firstConfig.Root { return fmt.Errorf("The root is not match") } if config.Driver != "" && config.Driver != context.firstConfig.Driver { return fmt.Errorf("The driver is not match") } if config.Kernel != "" && config.Kernel != context.firstConfig.Kernel { return fmt.Errorf("The kernel is not match") } if config.Initrd != "" && config.Initrd != context.firstConfig.Initrd { return fmt.Errorf("The initrd is not match") } if config.Vbox != "" && config.Vbox != context.firstConfig.Vbox { return fmt.Errorf("The vbox is not match") } // check shared namespace for _, ns := range config.LinuxRuntimeSpec.Linux.Namespaces { if ns.Path == "" { continue } _, ok := context.actives[ns.Path] if !ok { return fmt.Errorf("Cann't share namespace with: %s", ns.Path) } } // OK, the pod has been started, add this config and return context.actives[config.Name] = config return nil } hypervisor.InterfaceCount = 0 driver := config.Driver if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil { fmt.Printf("%s\n", err.Error()) return err } context.podId = fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) context.vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha")) context.userPod = pod.ConvertOCF2PureUserPod(&config.LinuxSpec, &config.LinuxRuntimeSpec) context.podStatus = hypervisor.NewPod(context.podId, context.userPod) context.vm, err = startVm(config, context.userPod, context.vmId) if err != nil { fmt.Printf("%s\n", err.Error()) return err } Response := context.vm.StartPod(context.podStatus, context.userPod, nil, nil) if Response.Data == nil { fmt.Printf("StartPod fail: QEMU response data is nil\n") return fmt.Errorf("StartPod fail") } fmt.Printf("result: code %d %s\n", Response.Code, Response.Cause) context.actives[config.Name] = config return nil }