Пример #1
0
func (daemon *Daemon) initRunV(c *apitypes.HyperConfig) error {
	var (
		err error
	)

	if hypervisor.HDriver, err = driverloader.Probe(c.Driver); err != nil {
		glog.Warningf("%s", err.Error())
		glog.Errorf("Please specify the correct and available hypervisor, such as 'kvm', 'qemu-kvm',  'libvirt', 'xen', 'qemu', or ''")
		return err
	}

	daemon.Hypervisor = c.Driver
	glog.Infof("The hypervisor's driver is %s", c.Driver)
	daemon.Factory = factory.NewFromPolicy(c.Kernel, c.Initrd, c.VmFactoryPolicy)

	return nil
}
Пример #2
0
			if (driver != "" && driver != tconfig.Driver) ||
				(kernel != "" && kernel != tconfig.Kernel) ||
				(initrd != "" && initrd != tconfig.Initrd) {
				glog.Infof("template config is not match the driver, kernel or initrd argument, disable template")
				template = ""
			} else if driver == "" {
				driver = tconfig.Driver
			}
		} else if kernel == "" || initrd == "" {
			glog.Infof("argument kernel and initrd must be set")
			os.Exit(1)
		}

		hypervisor.InterfaceCount = 0
		var err error
		if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
			glog.V(1).Infof("%s\n", err.Error())
			os.Exit(1)
		}

		var f factory.Factory
		if template != "" {
			f = singlefactory.New(templatefactory.NewFromExisted(tconfig))
		} else {
			f = factory.NewFromConfigs(kernel, initrd, nil)
		}
		sv, err := supervisor.New(stateDir, containerdDir, f,
			context.GlobalInt("default_cpus"), context.GlobalInt("default_memory"))
		if err != nil {
			glog.Infof("%v", err)
			os.Exit(1)
Пример #3
0
func mainDaemon(opts *Options) {
	config := opts.Config
	glog.V(1).Infof("The config file is %s", config)
	if config == "" {
		config = "/etc/hyper/config"
	}
	if _, err := os.Stat(config); err != nil {
		if os.IsNotExist(err) {
			glog.Errorf("Can not find config file(%s)", config)
			return
		}
		glog.Errorf(err.Error())
		return
	}

	os.Setenv("HYPER_CONFIG", config)
	cfg, err := goconfig.LoadConfigFile(config)
	if err != nil {
		glog.Errorf("Read config file (%s) failed, %s", config, err.Error())
		return
	}

	hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root")

	if hyperRoot == "" {
		hyperRoot = "/var/lib/hyper"
	}
	utils.HYPER_ROOT = hyperRoot
	if _, err := os.Stat(hyperRoot); err != nil {
		if err := os.MkdirAll(hyperRoot, 0755); err != nil {
			glog.Errorf(err.Error())
			return
		}
	}

	storageDriver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "StorageDriver")
	if storageDriver != "" {
		graphdriver.DefaultDriver = storageDriver
	}

	docker.Init(strings.Split(opts.Mirrors, ","), strings.Split(opts.InsecureRegistries, ","))
	eng := engine.New(config)

	d, err := daemon.NewDaemon(eng)
	if err != nil {
		glog.Errorf("The hyperd create failed, %s", err.Error())
		return
	}

	driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor")
	driver = strings.ToLower(driver)
	if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
		glog.Warningf("%s", err.Error())
		glog.Errorf("Please specify the correct and available hypervisor, such as 'kvm', 'qemu-kvm',  'libvirt', 'xen', 'qemu', 'vbox' or ''")
		return
	} else {
		d.Hypervisor = driver
		glog.Infof("The hypervisor's driver is %s", driver)
	}

	disableIptables := cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false)
	if err = hypervisor.InitNetwork(d.BridgeIface, d.BridgeIP, disableIptables || opts.DisableIptables); err != nil {
		glog.Errorf("InitNetwork failed, %s", err.Error())
		return
	}

	defaultLog, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Logger")
	defaultLogCfg, _ := cfg.GetSection("Log")
	d.DefaultLogCfg(defaultLog, defaultLogCfg)

	// Set the daemon object as the global varibal
	// which will be used for puller and builder
	utils.SetDaemon(d)
	if err := d.DockerCli.Setup(); err != nil {
		glog.Error(err.Error())
		return
	}

	stopAll := make(chan os.Signal, 1)
	signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM)
	stop := make(chan os.Signal, 1)
	signal.Notify(stop, syscall.SIGHUP)

	// Install the accepted jobs
	if err := d.Install(eng); err != nil {
		glog.Errorf("The hyperd install failed, %s", err.Error())
		return
	}

	glog.V(0).Infof("Hyper daemon: %s %s",
		utils.VERSION,
		utils.GITCOMMIT,
	)

	// after the daemon is done setting up we can tell the api to start
	// accepting connections
	if err := eng.Job("acceptconnections").Run(); err != nil {
		glog.Error("the acceptconnections job run failed!")
		return
	}
	defaultHost := []string{}
	if opts.Hosts != "" {
		defaultHost = append(defaultHost, opts.Hosts)
	}
	defaultHost = append(defaultHost, "unix:///var/run/hyper.sock")
	if d.Host != "" {
		defaultHost = append(defaultHost, d.Host)
	}

	job := eng.Job("serveapi", defaultHost...)

	// The serve API job never exits unless an error occurs
	// We need to start it as a goroutine and wait on it so
	// daemon doesn't exit
	serveAPIWait := make(chan error)
	go func() {
		if err := job.Run(); err != nil {
			glog.Errorf("ServeAPI error: %v", err)
			serveAPIWait <- err
			return
		}
		serveAPIWait <- nil
	}()

	glog.V(0).Info("Daemon has completed initialization")

	if err := d.Restore(); err != nil {
		glog.Warningf("Fail to restore the previous VM")
		return
	}

	vmCachePolicy, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "VmCachePolicy")
	d.InitVmCache(vmCachePolicy)

	// Daemon is fully initialized and handling API traffic
	// Wait for serve API job to complete
	select {
	case errAPI := <-serveAPIWait:
		// If we have an error here it is unique to API (as daemonErr would have
		// exited the daemon process above)
		eng.Shutdown()
		if errAPI != nil {
			glog.Warningf("Shutting down due to ServeAPI error: %v", errAPI)
		}
		break
	case <-stop:
		d.DestroyAndKeepVm()
		eng.Shutdown()
		break
	case <-stopAll:
		d.DestroyAllVm()
		eng.Shutdown()
		break
	}
}
Пример #4
0
func mainDaemon(config, host string, flDisableIptables bool) {
	glog.V(1).Infof("The config file is %s", config)
	if config == "" {
		config = "/etc/hyper/config"
	}
	if _, err := os.Stat(config); err != nil {
		if os.IsNotExist(err) {
			glog.Errorf("Can not find config file(%s)", config)
			return
		}
		glog.Errorf(err.Error())
		return
	}

	os.Setenv("HYPER_CONFIG", config)
	cfg, err := goconfig.LoadConfigFile(config)
	if err != nil {
		glog.Errorf("Read config file (%s) failed, %s", config, err.Error())
		return
	}
	driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor")
	if runtime.GOOS == "darwin" {
		driver = "vbox"
	} else {
		if driver == "" {
			// We need to provide a defaut exec drvier
			var drivers = []string{"kvm", "xen", "vbox"}
			driver = utils.GetAvailableDriver(drivers)
		}
		driver = strings.ToLower(driver)
	}
	if driver == "" || (driver != "kvm" && driver != "xen" && driver != "vbox") {
		fmt.Printf("Please specify the exec driver, such as 'kvm', 'xen' or 'vbox'\n")
		return
	}
	hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root")
	iso, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Vbox")

	if hyperRoot == "" {
		hyperRoot = "/var/lib/hyper"
	}
	if _, err := os.Stat(hyperRoot); err != nil {
		if err := os.MkdirAll(hyperRoot, 0755); err != nil {
			glog.Errorf(err.Error())
			return
		}
	}

	utils.SetHyperEnv(fmt.Sprintf("%s/.hyperconfig", os.Getenv("HOME")), hyperRoot, iso)
	eng := engine.New(config)
	docker.Init()

	d, err := daemon.NewDaemon(eng)
	if err != nil {
		glog.Errorf("The hyperd create failed, %s\n", err.Error())
		return
	}

	if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
		glog.Errorf("%s\n", err.Error())
		return
	}
	d.Hypervisor = driver

	disableIptables := cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false)
	if err = hypervisor.InitNetwork(d.BridgeIface, d.BridgeIP, disableIptables || flDisableIptables); err != nil {
		glog.Errorf("InitNetwork failed, %s\n", err.Error())
		return
	}

	// Set the daemon object as the global varibal
	// which will be used for puller and builder
	utils.SetDaemon(d)
	if err := d.DockerCli.Setup(); err != nil {
		glog.Error(err.Error())
		return
	}

	stopAll := make(chan os.Signal, 1)
	signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM)
	stop := make(chan os.Signal, 1)
	signal.Notify(stop, syscall.SIGHUP)

	// Install the accepted jobs
	if err := d.Install(eng); err != nil {
		glog.Errorf("The hyperd install failed, %s\n", err.Error())
		return
	}

	glog.V(0).Infof("Hyper daemon: %s %s\n",
		utils.VERSION,
		utils.GITCOMMIT,
	)

	// after the daemon is done setting up we can tell the api to start
	// accepting connections
	if err := eng.Job("acceptconnections").Run(); err != nil {
		glog.Error("the acceptconnections job run failed!\n")
		return
	}
	defaultHost := []string{}
	if host != "" {
		defaultHost = append(defaultHost, host)
	}
	defaultHost = append(defaultHost, "unix:///var/run/hyper.sock")
	if d.Host != "" {
		defaultHost = append(defaultHost, d.Host)
	}

	job := eng.Job("serveapi", defaultHost...)

	// The serve API job never exits unless an error occurs
	// We need to start it as a goroutine and wait on it so
	// daemon doesn't exit
	serveAPIWait := make(chan error)
	go func() {
		if err := job.Run(); err != nil {
			glog.Errorf("ServeAPI error: %v\n", err)
			serveAPIWait <- err
			return
		}
		serveAPIWait <- nil
	}()

	glog.V(0).Info("Daemon has completed initialization\n")

	if err := d.Restore(); err != nil {
		glog.Warningf("Fail to restore the previous VM")
		return
	}

	// Daemon is fully initialized and handling API traffic
	// Wait for serve API job to complete
	select {
	case errAPI := <-serveAPIWait:
		// If we have an error here it is unique to API (as daemonErr would have
		// exited the daemon process above)
		eng.Shutdown()
		if errAPI != nil {
			glog.Warningf("Shutting down due to ServeAPI error: %v\n", errAPI)
		}
		break
	case <-stop:
		d.DestroyAndKeepVm()
		eng.Shutdown()
		break
	case <-stopAll:
		d.DestroyAllVm()
		eng.Shutdown()
		break
	}
}
Пример #5
0
func main() {
	hypervisor.InterfaceCount = 0

	var containerInfoList []*hypervisor.ContainerInfo
	var roots []string
	var containerId string
	var err error

	ocffile := flag.String("config", "", "ocf configure file")
	kernel := flag.String("kernel", "", "hyper kernel")
	initrd := flag.String("initrd", "", "hyper initrd")
	vbox := flag.String("vbox", "", "vbox boot iso")
	driver := flag.String("driver", "", "hypervisor driver")

	flag.Parse()

	if *ocffile == "" {
		*ocffile = "config.json"
	}

	if _, err = os.Stat(*ocffile); os.IsNotExist(err) {
		fmt.Printf("Please specify ocffile or put config.json under current working directory\n")
		return
	}

	if *vbox == "" {
		*vbox = "./vbox.iso"
	}

	if _, err = os.Stat(*vbox); err == nil {
		*vbox, err = filepath.Abs(*vbox)
		if err != nil {
			fmt.Printf("Cannot get abs path for vbox: %s\n", err.Error())
			return
		}
	}

	if *kernel == "" {
		*kernel = "./kernel"
	}

	if _, err = os.Stat(*kernel); err == nil {
		*kernel, err = filepath.Abs(*kernel)
		if err != nil {
			fmt.Printf("Cannot get abs path for kernel: %s\n", err.Error())
			return
		}
	}

	if *initrd == "" {
		*initrd = "./initrd.img"
	}

	if _, err = os.Stat(*initrd); err == nil {
		*initrd, err = filepath.Abs(*initrd)
		if err != nil {
			fmt.Printf("Cannot get abs path for initrd: %s\n", err.Error())
			return
		}
	}

	if *driver == "" {
		*driver = "kvm"
		fmt.Printf("Use default hypervisor KVM\n")
	}

	if hypervisor.HDriver, err = driverloader.Probe(*driver); err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))

	ocfData, err := ioutil.ReadFile(*ocffile)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	userPod, err := pod.OCFConvert2Pod(ocfData)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	mypod := hypervisor.NewPod(podId, userPod)

	var (
		cpu = 1
		mem = 128
	)

	if userPod.Resource.Vcpu > 0 {
		cpu = userPod.Resource.Vcpu
	}

	if userPod.Resource.Memory > 0 {
		mem = userPod.Resource.Memory
	}

	b := &hypervisor.BootConfig{
		Kernel: *kernel,
		Initrd: *initrd,
		Bios:   "",
		Cbfs:   "",
		Vbox:   *vbox,
		CPU:    cpu,
		Memory: mem,
	}

	vm := hypervisor.NewVm(vmId, cpu, mem, false, types.VM_KEEP_NONE)
	err = vm.Launch(b)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	sharedDir := path.Join(hypervisor.BaseDir, vm.Id, hypervisor.ShareDirTag)

	for _, c := range userPod.Containers {
		var root string
		var err error

		containerId = GenerateRandomID()
		rootDir := path.Join(sharedDir, containerId)
		os.MkdirAll(rootDir, 0755)

		rootDir = path.Join(rootDir, "rootfs")

		if !filepath.IsAbs(c.Image) {
			root, err = filepath.Abs(c.Image)
			if err != nil {
				fmt.Printf("%s\n", err.Error())
				return
			}
		} else {
			root = c.Image
		}

		err = mount(root, rootDir)
		if err != nil {
			fmt.Printf("mount %s to %s failed: %s\n", root, rootDir, err.Error())
			return
		}
		roots = append(roots, rootDir)

		containerInfo := &hypervisor.ContainerInfo{
			Id:     containerId,
			Rootfs: "rootfs",
			Image:  containerId,
			Fstype: "dir",
		}

		containerInfoList = append(containerInfoList, containerInfo)
		mypod.AddContainer(containerId, podId, "", []string{}, types.S_POD_CREATED)
	}

	qemuResponse := vm.StartPod(mypod, userPod, containerInfoList, nil)
	if qemuResponse.Data == nil {
		fmt.Printf("StartPod fail: QEMU response data is nil\n")
		return
	}
	fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause)

	inFd, _ := term.GetFdInfo(os.Stdin)
	outFd, isTerminalOut := term.GetFdInfo(os.Stdout)

	oldState, err := term.SetRawTerminal(inFd)
	if err != nil {
		return
	}

	height, width := getTtySize(outFd, isTerminalOut)
	winSize := &hypervisor.WindowSize{
		Row:    uint16(height),
		Column: uint16(width),
	}

	tag := pod.RandStr(8, "alphanum")

	monitorTtySize(vm, tag, outFd, isTerminalOut)

	vm.Attach(os.Stdin, os.Stdout, tag, containerId, winSize)

	qemuResponse = vm.StopPod(mypod, "yes")

	term.RestoreTerminal(inFd, oldState)

	for _, root := range roots {
		umount(root)
	}

	if qemuResponse.Data == nil {
		fmt.Printf("StopPod fail: QEMU response data is nil\n")
		return
	}
	fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause)
}
Пример #6
0
func mainDaemon(opt *Options) {
	config := opt.Config
	glog.V(1).Infof("The config file is %s", config)
	if config == "" {
		config = "/etc/hyper/config"
	}
	if _, err := os.Stat(config); err != nil {
		if os.IsNotExist(err) {
			glog.Errorf("Can not find config file(%s)", config)
			return
		}
		glog.Errorf(err.Error())
		return
	}

	os.Setenv("HYPER_CONFIG", config)
	cfg, err := goconfig.LoadConfigFile(config)
	if err != nil {
		glog.Errorf("Read config file (%s) failed, %s", config, err.Error())
		return
	}

	hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root")

	if hyperRoot == "" {
		hyperRoot = "/var/lib/hyper"
	}
	utils.HYPER_ROOT = hyperRoot
	if _, err := os.Stat(hyperRoot); err != nil {
		if err := os.MkdirAll(hyperRoot, 0755); err != nil {
			glog.Errorf(err.Error())
			return
		}
	}

	storageDriver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "StorageDriver")
	daemon.InitDockerCfg(strings.Split(opt.Mirrors, ","), strings.Split(opt.InsecureRegistries, ","), storageDriver, hyperRoot)
	d, err := daemon.NewDaemon(cfg)
	if err != nil {
		glog.Errorf("The hyperd create failed, %s", err.Error())
		return
	}

	vbox.Register(d)

	serverConfig := &server.Config{}

	defaultHost := "unix:///var/run/hyper.sock"
	Hosts := []string{defaultHost}

	if opt.Hosts != "" {
		Hosts = append(Hosts, opt.Hosts)
	}
	if d.Host != "" {
		Hosts = append(Hosts, d.Host)
	}

	for i := 0; i < len(Hosts); i++ {
		var err error
		if Hosts[i], err = opts.ParseHost(defaultHost, Hosts[i]); err != nil {
			glog.Errorf("error parsing -H %s : %v", Hosts[i], err)
			return
		}

		protoAddr := Hosts[i]
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		if len(protoAddrParts) != 2 {
			glog.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
			return
		}
		serverConfig.Addrs = append(serverConfig.Addrs, server.Addr{Proto: protoAddrParts[0], Addr: protoAddrParts[1]})
	}

	api, err := server.New(serverConfig)
	if err != nil {
		glog.Errorf(err.Error())
		return
	}

	api.InitRouters(d)

	driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor")
	driver = strings.ToLower(driver)
	if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
		glog.Warningf("%s", err.Error())
		glog.Errorf("Please specify the correct and available hypervisor, such as 'kvm', 'qemu-kvm',  'libvirt', 'xen', 'qemu', 'vbox' or ''")
		return
	} else {
		d.Hypervisor = driver
		glog.Infof("The hypervisor's driver is %s", driver)
	}

	disableIptables := cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false)
	if err = hypervisor.InitNetwork(d.BridgeIface, d.BridgeIP, disableIptables || opt.DisableIptables); err != nil {
		glog.Errorf("InitNetwork failed, %s", err.Error())
		return
	}

	defaultLog, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Logger")
	defaultLogCfg, _ := cfg.GetSection("Log")
	d.DefaultLogCfg(defaultLog, defaultLogCfg)

	// Set the daemon object as the global varibal
	// which will be used for puller and builder
	utils.SetDaemon(d)

	if err := d.Restore(); err != nil {
		glog.Warningf("Fail to restore the previous VM")
		return
	}

	vmFactoryPolicy, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "VmFactoryPolicy")
	d.Factory = factory.NewFromPolicy(d.Kernel, d.Initrd, vmFactoryPolicy)

	rpcHost, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "gRPCHost")
	if rpcHost != "" {
		rpcServer := serverrpc.NewServerRPC(d)
		defer rpcServer.Stop()

		go func() {
			err := rpcServer.Serve(rpcHost)
			if err != nil {
				glog.Fatalf("Hyper serve RPC error: %v", err)
			}
		}()
	}

	// The serve API routine never exits unless an error occurs
	// We need to start it as a goroutine and wait on it so
	// daemon doesn't exit
	serveAPIWait := make(chan error)
	go api.Wait(serveAPIWait)

	stopAll := make(chan os.Signal, 1)
	signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM)
	stop := make(chan os.Signal, 1)
	signal.Notify(stop, syscall.SIGHUP)

	glog.V(0).Infof("Hyper daemon: %s %s",
		utils.VERSION,
		utils.GITCOMMIT,
	)

	// Daemon is fully initialized and handling API traffic
	// Wait for serve API job to complete
	select {
	case errAPI := <-serveAPIWait:
		// If we have an error here it is unique to API (as daemonErr would have
		// exited the daemon process above)
		if errAPI != nil {
			glog.Warningf("Shutting down due to ServeAPI error: %v", errAPI)
		}
		break
	case <-stop:
		d.DestroyAndKeepVm()
		break
	case <-stopAll:
		d.DestroyAllVm()
		break
	}
	d.Factory.CloseFactory()
	api.Close()
	d.Shutdown()
}
Пример #7
0
		initrd := absOption("initrd")
		template := absOption("template")

		if err := os.MkdirAll(template, 0700); err != nil {
			fmt.Printf("Failed to create the template directory: %v\n", err)
			os.Exit(-1)
		}

		if context.GlobalBool("debug") {
			flag.CommandLine.Parse([]string{"-v", "3", "--log_dir", context.GlobalString("log_dir"), "--alsologtostderr"})
		} else {
			flag.CommandLine.Parse([]string{"-v", "1", "--log_dir", context.GlobalString("log_dir")})
		}

		var err error
		if hypervisor.HDriver, err = driverloader.Probe(context.GlobalString("driver")); err != nil {
			glog.V(1).Infof("%v\n", err)
			fmt.Printf("Failed to setup the driver: %v\n", err)
			os.Exit(-1)
		}

		if _, err := templatecore.CreateTemplateVM(template, "", context.Int("cpu"), context.Int("mem"), kernel, initrd); err != nil {
			fmt.Printf("Failed to create the template: %v\n", err)
			os.Exit(-1)
		}
	},
}

var removeTemplateCommand = cli.Command{
	Name:  "remove-template",
	Usage: "remove the template VM on the directory specified by the global option --template",
Пример #8
0
func startRunvPod(context *nsContext, config *startConfig) (err error) {
	context.lock.Lock()
	defer context.lock.Unlock()
	if context.firstConfig == nil {
		context.firstConfig = config
	} else {
		// check stopped
		if len(context.actives) == 0 {
			return fmt.Errorf("The namespace service was stopped")
		}
		// check match
		if config.Root != "" && config.Root != context.firstConfig.Root {
			return fmt.Errorf("The root is not match")
		}
		if config.Driver != "" && config.Driver != context.firstConfig.Driver {
			return fmt.Errorf("The driver is not match")
		}
		if config.Kernel != "" && config.Kernel != context.firstConfig.Kernel {
			return fmt.Errorf("The kernel is not match")
		}
		if config.Initrd != "" && config.Initrd != context.firstConfig.Initrd {
			return fmt.Errorf("The initrd is not match")
		}
		if config.Vbox != "" && config.Vbox != context.firstConfig.Vbox {
			return fmt.Errorf("The vbox is not match")
		}
		// check shared namespace
		for _, ns := range config.LinuxRuntimeSpec.Linux.Namespaces {
			if ns.Path == "" {
				continue
			}
			_, ok := context.actives[ns.Path]
			if !ok {
				return fmt.Errorf("Cann't share namespace with: %s", ns.Path)
			}
		}
		// OK, the pod has been started, add this config and return
		context.actives[config.Name] = config
		return nil
	}

	hypervisor.InterfaceCount = 0

	driver := config.Driver
	if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
		fmt.Printf("%s\n", err.Error())
		return err
	}

	context.podId = fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	context.vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))

	context.userPod = pod.ConvertOCF2PureUserPod(&config.LinuxSpec, &config.LinuxRuntimeSpec)
	context.podStatus = hypervisor.NewPod(context.podId, context.userPod)
	context.vm, err = startVm(config, context.userPod, context.vmId)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return err
	}

	Response := context.vm.StartPod(context.podStatus, context.userPod, nil, nil)
	if Response.Data == nil {
		fmt.Printf("StartPod fail: QEMU response data is nil\n")
		return fmt.Errorf("StartPod fail")
	}
	fmt.Printf("result: code %d %s\n", Response.Code, Response.Cause)

	context.actives[config.Name] = config
	return nil
}