Example #1
0
File: vm.go Project: juito/hyper
func (daemon *Daemon) StartVm(vmId string, cpu, mem int, lazy bool) (vm *hypervisor.Vm, err error) {
	var (
		DEFAULT_CPU = 1
		DEFAULT_MEM = 128
	)

	if cpu <= 0 {
		cpu = DEFAULT_CPU
	}
	if mem <= 0 {
		mem = DEFAULT_MEM
	}

	b := &hypervisor.BootConfig{
		CPU:    cpu,
		Memory: mem,
		Kernel: daemon.Kernel,
		Initrd: daemon.Initrd,
		Bios:   daemon.Bios,
		Cbfs:   daemon.Cbfs,
		Vbox:   daemon.VboxImage,
	}

	glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.Kernel, daemon.Initrd)
	if vmId != "" || daemon.Bios != "" || daemon.Cbfs != "" || runtime.GOOS == "darwin" || lazy {
		vm, err = hypervisor.GetVm(vmId, b, false, lazy)
	} else {
		vm, err = daemon.Factory.GetVm(cpu, mem)
	}
	if err == nil {
		daemon.VmList.Add(vm)
	}
	return vm, err
}
Example #2
0
func startSandbox(f factory.Factory, cpu, mem int, kernel, initrd string) (vm *hypervisor.Vm, err error) {
	var (
		DEFAULT_CPU = 1
		DEFAULT_MEM = 128
	)

	if cpu <= 0 {
		cpu = DEFAULT_CPU
	}
	if mem <= 0 {
		mem = DEFAULT_MEM
	}

	if kernel == "" {
		hlog.Log(DEBUG, "get sandbox from factory: CPU: %d, Memory %d", cpu, mem)
		vm, err = f.GetVm(cpu, mem)
	} else {
		hlog.Log(DEBUG, "The create sandbox with: kernel=%s, initrd=%s, cpu=%d, memory=%d", kernel, initrd, cpu, mem)
		config := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: kernel,
			Initrd: initrd,
		}
		vm, err = hypervisor.GetVm("", config, false, hypervisor.HDriver.SupportLazyMode())
	}
	if err != nil {
		hlog.Log(ERROR, "failed to create a sandbox (cpu=%d, mem=%d kernel=%s initrd=%d): %v", cpu, mem, kernel, initrd, err)
	}

	return vm, err
}
Example #3
0
func (f Factory) GetVm(cpu, mem int) (*hypervisor.Vm, error) {
	// check if match the base
	config := f.Config()
	if config.CPU > cpu || config.Memory > mem {
		// also strip unrelated option from @config
		boot := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: config.Kernel,
			Initrd: config.Initrd,
		}
		return hypervisor.GetVm("", boot, false, false)
	}

	vm, err := f.GetBaseVm()
	if err != nil {
		return nil, err
	}

	// unpause
	vm.Pause(false)

	// hotplug add cpu and memory
	var needOnline bool = false
	if vm.Cpu < cpu {
		needOnline = true
		glog.Info("HotAddCpu for cached Vm")
		err = vm.SetCpus(cpu)
		glog.Info("HotAddCpu result %v", err)
	}
	if vm.Mem < mem {
		needOnline = true
		glog.Info("HotAddMem for cached Vm")
		err = vm.AddMem(mem)
		glog.Info("HotAddMem result %v", err)
	}
	if needOnline {
		glog.Info("OnlineCpuMem for cached Vm")
		vm.OnlineCpuMem()
	}
	if err != nil {
		vm.Kill()
		vm = nil
	}
	return vm, err
}
Example #4
0
func (d *directFactory) GetBaseVm() (*hypervisor.Vm, error) {
	glog.V(2).Infof("direct factory start create vm")
	vm, err := hypervisor.GetVm("", d.Config(), true, false, 0)
	if err == nil {
		err = vm.Pause(true)
		if err != nil {
			vm.Kill()
			vm = nil
		}
	}
	if err == nil {
		glog.V(2).Infof("direct factory created vm:%s", vm.Id)
	} else {
		glog.V(2).Infof("direct factory failed to create vm")

	}
	return vm, err
}
Example #5
0
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) {
	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	userPod := pod.ConvertOCF2PureUserPod(spec)
	podStatus := hypervisor.NewPod(podId, userPod, nil)

	cpu := defaultCpus
	if userPod.Resource.Vcpu > 0 {
		cpu = userPod.Resource.Vcpu
	}
	mem := defaultMemory
	if userPod.Resource.Memory > 0 {
		mem = userPod.Resource.Memory
	}

	kernel := chooseKernel(spec)
	initrd := chooseInitrd(spec)
	glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem)

	var (
		vm  *hypervisor.Vm
		err error
	)
	if len(kernel) == 0 && len(initrd) == 0 {
		vm, err = f.GetVm(cpu, mem)
		if err != nil {
			glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with default kernel config")
	} else if len(kernel) == 0 || len(initrd) == 0 {
		// if user specify a kernel, they must specify an initrd at the same time
		return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa")
	} else {
		boot := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: kernel,
			Initrd: initrd,
		}

		vm, err = hypervisor.GetVm("", boot, true, false)
		if err != nil {
			glog.V(1).Infof("Create VM failed: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with specific kernel config")
	}

	Response := vm.StartPod(podStatus, userPod, nil, nil)
	if Response.Data == nil {
		vm.Kill()
		glog.V(1).Infof("StartPod fail: QEMU response data is nil\n")
		return nil, fmt.Errorf("StartPod fail")
	}
	glog.V(1).Infof("result: code %d %s\n", Response.Code, Response.Cause)

	hp := &HyperPod{
		userPod:    userPod,
		podStatus:  podStatus,
		vm:         vm,
		Containers: make(map[string]*Container),
		Processes:  make(map[string]*Process),
	}

	// create Listener process running in its own netns
	if err = hp.startNsListener(); err != nil {
		hp.reap()
		glog.V(1).Infof("start ns listener fail: %s\n", err.Error())
		return nil, err
	}

	return hp, nil
}
Example #6
0
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) {
	cpu := defaultCpus
	mem := defaultMemory
	if spec.Linux != nil && spec.Linux.Resources != nil && spec.Linux.Resources.Memory != nil && spec.Linux.Resources.Memory.Limit != nil {
		mem = int(*spec.Linux.Resources.Memory.Limit >> 20)
	}

	kernel := chooseKernel(spec)
	initrd := chooseInitrd(spec)
	glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem)

	var (
		vm  *hypervisor.Vm
		err error
	)
	if len(kernel) == 0 && len(initrd) == 0 {
		vm, err = f.GetVm(cpu, mem)
		if err != nil {
			glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with default kernel config")
	} else if len(kernel) == 0 || len(initrd) == 0 {
		// if user specify a kernel, they must specify an initrd at the same time
		return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa")
	} else {
		boot := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: kernel,
			Initrd: initrd,
		}

		vm, err = hypervisor.GetVm("", boot, true, false)
		if err != nil {
			glog.V(1).Infof("Create VM failed: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with specific kernel config")
	}

	r := make(chan api.Result, 1)
	go func() {
		r <- vm.WaitInit()
	}()

	sandbox := api.SandboxInfoFromOCF(spec)
	vm.InitSandbox(sandbox)

	rsp := <-r

	if !rsp.IsSuccess() {
		vm.Kill()
		glog.V(1).Infof("StartPod fail, response: %v", rsp)
		return nil, fmt.Errorf("StartPod fail")
	}
	glog.V(1).Infof("%s init sandbox successfully", rsp.ResultId())

	hp := &HyperPod{
		vm:         vm,
		Containers: make(map[string]*Container),
		Processes:  make(map[string]*Process),
	}

	// create Listener process running in its own netns
	if err = hp.startNsListener(); err != nil {
		hp.reap()
		glog.V(1).Infof("start ns listener fail: %s\n", err.Error())
		return nil, err
	}

	return hp, nil
}
Example #7
0
func CreateTemplateVM(statePath, vmName string, cpu, mem int, kernel, initrd string) (t *TemplateVmConfig, err error) {
	defer func() {
		if err != nil {
			(&TemplateVmConfig{StatePath: statePath}).Destroy()
		}
	}()

	// prepare statePath
	if err := os.MkdirAll(statePath, 0700); err != nil {
		glog.Infof("create template state path failed: %v", err)
		return nil, err
	}
	flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV)
	opts := fmt.Sprintf("size=%dM", mem+8)
	if err = syscall.Mount("tmpfs", statePath, "tmpfs", flags, opts); err != nil {
		glog.Infof("mount template state path failed: %v", err)
		return nil, err
	}
	if f, err := os.Create(statePath + "/memory"); err != nil {
		glog.Infof("create memory path failed: %v", err)
		return nil, err
	} else {
		f.Close()
	}

	// launch vm
	b := &hypervisor.BootConfig{
		CPU:              cpu,
		Memory:           mem,
		HotAddCpuMem:     true,
		BootToBeTemplate: true,
		BootFromTemplate: false,
		MemoryPath:       statePath + "/memory",
		DevicesStatePath: statePath + "/state",
		Kernel:           kernel,
		Initrd:           initrd,
	}
	vm, err := hypervisor.GetVm(vmName, b, true, false)
	if err != nil {
		return nil, err
	}
	defer vm.Kill()

	// pasue and save devices state
	if err = vm.Pause(true); err != nil {
		glog.Infof("failed to pause template vm:%v", err)
		return nil, err
	}
	if err = vm.Save(statePath + "/state"); err != nil {
		glog.Infof("failed to save template vm states: %v", err)
		return nil, err
	}

	// TODO: qemu driver's qmp doesn't wait migration finish.
	// so we wait here. We should fix it in the qemu driver side.
	time.Sleep(1 * time.Second)

	config := &TemplateVmConfig{
		StatePath: statePath,
		Driver:    hypervisor.HDriver.Name(),
		Cpu:       cpu,
		Memory:    mem,
		Kernel:    kernel,
		Initrd:    initrd,
	}

	configData, err := json.MarshalIndent(config, "", "\t")
	if err != nil {
		glog.V(1).Infof("%s\n", err.Error())
		return nil, err
	}
	configFile := filepath.Join(statePath, "config.json")
	err = ioutil.WriteFile(configFile, configData, 0644)
	if err != nil {
		glog.V(1).Infof("%s\n", err.Error())
		return nil, err
	}

	return config, nil
}
Example #8
0
// boot vm from template, the returned vm is paused
func (t *TemplateVmConfig) NewVmFromTemplate(vmName string) (*hypervisor.Vm, error) {
	return hypervisor.GetVm(vmName, t.BootConfigFromTemplate(), true, false)
}