Exemple #1
0
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
	glog.Infof("/dev/mapper/%s", volName)
	if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
		return nil
	}
	if restore == false {
		parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}

	if restore == false {
		parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	return nil
}
Exemple #2
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := unixSocketConnect(ctx.consoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.consoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.consoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.consoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.consoleSockName)

	cout := make(chan string, 128)
	go ttyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Exemple #3
0
func waitInitReady(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.HyperSockName)
	if err != nil {
		glog.Error("Cannot connect to hyper socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot connect to hyper socket " + err.Error(),
		}
		return
	}

	glog.Info("Wating for init messages...")

	msg, err := readVmMessage(conn.(*net.UnixConn))
	if err != nil {
		glog.Error("read init message failed... ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "read init message failed... " + err.Error(),
		}
		conn.Close()
	} else if msg.code == INIT_READY {
		glog.Info("Get init ready message")
		ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)}
		go waitCmdToInit(ctx, conn.(*net.UnixConn))
	} else {
		glog.Warningf("Get init message %d", msg.code)
		ctx.Hub <- &InitFailedEvent{
			Reason: fmt.Sprintf("Get init message %d", msg.code),
		}
		conn.Close()
	}
}
Exemple #4
0
func InitNetwork(bIface, bIP string) error {
	if bIface == "" {
		BridgeIface = defaultBridgeIface
	} else {
		BridgeIface = bIface
	}

	if bIP == "" {
		BridgeIP = defaultBridgeIP
	} else {
		BridgeIP = bIP
	}

	addr, err := GetIfaceAddr(BridgeIface)
	if err != nil {
		glog.V(1).Infof("create bridge %s, ip %s", BridgeIface, BridgeIP)
		// No Bridge existent, create one

		// If the iface is not found, try to create it
		if err := configureBridge(BridgeIP, BridgeIface); err != nil {
			glog.Error("create bridge failed")
			return err
		}

		addr, err = GetIfaceAddr(BridgeIface)
		if err != nil {
			glog.Error("get iface addr failed\n")
			return err
		}

		bridgeIPv4Net = addr.(*net.IPNet)
	} else {
		glog.V(1).Info("bridge exist\n")
		// Validate that the bridge ip matches the ip specified by BridgeIP
		bridgeIPv4Net = addr.(*net.IPNet)

		if BridgeIP != "" {
			bip, _, err := net.ParseCIDR(BridgeIP)
			if err != nil {
				return err
			}
			if !bridgeIPv4Net.Contains(bip) {
				return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", addr, bip)
			}
		}
	}

	err = setupIPTables(addr)
	if err != nil {
		return err
	}

	ipAllocator.RequestIP(bridgeIPv4Net, bridgeIPv4Net.IP)
	return nil
}
Exemple #5
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(ctx *VmContext) {
	qemu, err := exec.LookPath("qemu-system-x86_64")
	if err != nil {
		ctx.hub <- &QemuExitEvent{message: "can not find qemu executable"}
		return
	}

	args := ctx.QemuArguments()

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	go waitConsoleOutput(ctx)

	pipe := make([]int, 2)
	err = syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.hub <- &QemuExitEvent{message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.watchPid(int(pid))
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.hub <- &QemuExitEvent{message: "watch qemu process failed"}
		return
	}
}
Exemple #6
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
	qemu := qc.driver.executable
	if qemu == "" {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
		return
	}

	args := qc.arguments(ctx)

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	pipe := make([]int, 2)
	err := syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
		return
	}
}
Exemple #7
0
func CreateNewDevice(containerId, devPrefix, rootPath string) error {
	var metadataPath = fmt.Sprintf("%s/metadata/", rootPath)
	// Get device id from the metadata file
	idMetadataFile := path.Join(metadataPath, containerId)
	if _, err := os.Stat(idMetadataFile); err != nil && os.IsNotExist(err) {
		return err
	}
	jsonData, err := ioutil.ReadFile(idMetadataFile)
	if err != nil {
		return err
	}
	var dat jsonMetadata
	if err := json.Unmarshal(jsonData, &dat); err != nil {
		return err
	}
	deviceId := dat.Device_id
	deviceSize := dat.Size
	// Activate the device for that device ID
	devName := fmt.Sprintf("%s-%s", devPrefix, containerId)
	poolName := fmt.Sprintf("/dev/mapper/%s-pool", devPrefix)
	createDeviceCmd := fmt.Sprintf("dmsetup create %s --table \"0 %d thin %s %d\"", devName, deviceSize/512, poolName, deviceId)
	createDeviceCommand := exec.Command("/bin/sh", "-c", createDeviceCmd)
	output, err := createDeviceCommand.Output()
	if err != nil {
		glog.Error(output)
		return err
	}
	return nil
}
Exemple #8
0
func (daemon *Daemon) DeleteVolumeId(podId string) error {
	key := fmt.Sprintf("vol-%s", podId)
	iter := (daemon.db).NewIterator(util.BytesPrefix([]byte(key)), nil)
	for iter.Next() {
		value := iter.Key()
		if string(value)[4:18] == podId {
			fields := strings.Split(string(iter.Value()), ":")
			dev_id, _ := strconv.Atoi(fields[1])
			if err := dm.DeleteVolume(daemon.Storage.DmPoolData, dev_id); err != nil {
				glog.Error(err.Error())
				return err
			}
		}
		err := (daemon.db).Delete(value, nil)
		if err != nil {
			return err
		}
	}
	iter.Release()
	err := iter.Error()
	if err != nil {
		return err
	}
	return nil
}
Exemple #9
0
func initContext(id string, hub chan QemuEvent, client chan *types.QemuResponse, boot *BootConfig) (*VmContext, error) {

	var err error = nil

	qmpChannel := make(chan QmpInteraction, 128)
	vmChannel := make(chan *DecodedMessage, 128)
	defer func() {
		if err != nil {
			close(qmpChannel)
			close(vmChannel)
		}
	}()

	//dir and sockets:
	homeDir := BaseDir + "/" + id + "/"
	qmpSockName := homeDir + QmpSockName
	hyperSockName := homeDir + HyperSockName
	ttySockName := homeDir + TtySockName
	consoleSockName := homeDir + ConsoleSockName
	shareDir := homeDir + ShareDirTag

	err = os.MkdirAll(shareDir, 0755)
	if err != nil {
		glog.Error("cannot make dir", shareDir, err.Error())
		return nil, err
	}
	defer func() {
		if err != nil {
			os.RemoveAll(homeDir)
		}
	}()

	return &VmContext{
		Id:              id,
		Boot:            boot,
		pciAddr:         PciAddrFrom,
		scsiId:          0,
		attachId:        1,
		hub:             hub,
		client:          client,
		qmp:             qmpChannel,
		vm:              vmChannel,
		wdt:             make(chan string, 16),
		ptys:            newPts(),
		ttySessions:     make(map[string]uint64),
		qmpSockName:     qmpSockName,
		hyperSockName:   hyperSockName,
		ttySockName:     ttySockName,
		consoleSockName: consoleSockName,
		shareDir:        shareDir,
		timer:           nil,
		process:         nil,
		handler:         stateInit,
		userSpec:        nil,
		vmSpec:          nil,
		devices:         newDeviceMap(),
		progress:        newProcessingList(),
		lock:            &sync.Mutex{},
	}, nil
}
Exemple #10
0
func initFailureHandler(ctx *VmContext, ev QemuEvent) bool {
	processed := true
	switch ev.Event() {
	case ERROR_INIT_FAIL: // Qemu connection Failure
		reason := ev.(*InitFailedEvent).reason
		glog.Error(reason)
	case ERROR_QMP_FAIL: // Device allocate and insert Failure
		reason := "QMP protocol exception"
		if ev.(*DeviceFailed).session != nil {
			reason = "QMP protocol exception: failed while waiting " + EventString(ev.(*DeviceFailed).session.Event())
		}
		glog.Error(reason)
	default:
		processed = false
	}
	return processed
}
Exemple #11
0
func (ctx *VmContext) poweroffVM(err bool, msg string) {
	if err {
		ctx.reportVmFault(msg)
		glog.Error("Shutting down because of an exception: ", msg)
	}
	qmpQemuQuit(ctx)
	ctx.timedKill(10)
}
Exemple #12
0
func (ctx *VmContext) shutdownVM(err bool, msg string) {
	if err {
		ctx.reportVmFault(msg)
		glog.Error("Shutting down because of an exception: ", msg)
	}
	ctx.setTimeout(10)
	ctx.vm <- &DecodedMessage{code: INIT_DESTROYPOD, message: []byte{}}
}
Exemple #13
0
func DeleteVolume(dm *DeviceMapper, dev_id int) error {
	var parms string
	// Delete the thin pool for test
	parms = fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"delete %d\"", dm.PoolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}
	return nil
}
Exemple #14
0
func InitContext(dr HypervisorDriver, id string, hub chan VmEvent, client chan *types.QemuResponse, dc DriverContext, boot *BootConfig) (*VmContext, error) {

	var err error = nil

	vmChannel := make(chan *DecodedMessage, 128)

	//dir and sockets:
	homeDir := BaseDir + "/" + id + "/"
	hyperSockName := homeDir + HyperSockName
	ttySockName := homeDir + TtySockName
	consoleSockName := homeDir + ConsoleSockName
	shareDir := homeDir + ShareDirTag

	if dc == nil {
		dc = dr.InitContext(homeDir)
	}

	err = os.MkdirAll(shareDir, 0755)
	if err != nil {
		glog.Error("cannot make dir", shareDir, err.Error())
		return nil, err
	}
	defer func() {
		if err != nil {
			os.Remove(homeDir)
		}
	}()

	return &VmContext{
		Id:              id,
		Boot:            boot,
		pciAddr:         PciAddrFrom,
		scsiId:          0,
		attachId:        1,
		Hub:             hub,
		client:          client,
		DCtx:            dc,
		vm:              vmChannel,
		ptys:            newPts(),
		ttySessions:     make(map[string]uint64),
		HomeDir:         homeDir,
		HyperSockName:   hyperSockName,
		TtySockName:     ttySockName,
		ConsoleSockName: consoleSockName,
		ShareDir:        shareDir,
		timer:           nil,
		handler:         stateInit,
		userSpec:        nil,
		vmSpec:          nil,
		devices:         newDeviceMap(),
		progress:        newProcessingList(),
		lock:            &sync.Mutex{},
		wait:            false,
	}, nil
}
Exemple #15
0
// The caller must make sure that the restart policy and the status is right to restart
func (daemon *Daemon) RestartPod(mypod *Pod) error {
	// Remove the pod
	// The pod is stopped, the vm is gone
	for _, c := range mypod.Containers {
		glog.V(1).Infof("Ready to rm container: %s", c.Id)
		if _, _, err := daemon.dockerCli.SendCmdDelete(c.Id); err != nil {
			glog.V(1).Infof("Error to rm container: %s", err.Error())
		}
	}
	daemon.RemovePod(mypod.Id)
	daemon.DeletePodContainerFromDB(mypod.Id)
	daemon.DeleteVolumeId(mypod.Id)
	podData, err := daemon.GetPodByName(mypod.Id)
	vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
	// Start the pod
	_, _, err = daemon.StartPod(mypod.Id, vmId, string(podData))
	if err != nil {
		daemon.KillVm(vmId)
		glog.Error(err.Error())
		return err
	}
	if err := daemon.WritePodAndContainers(mypod.Id); err != nil {
		glog.Error("Found an error while saving the Containers info")
		return err
	}
	userPod, err := pod.ProcessPodBytes(podData)
	if err != nil {
		return err
	}

	vm := &Vm{
		Id:     vmId,
		Pod:    daemon.podList[mypod.Id],
		Status: types.S_VM_ASSOCIATED,
		Cpu:    userPod.Resource.Vcpu,
		Mem:    userPod.Resource.Memory,
	}
	daemon.podList[mypod.Id].Vm = vmId
	daemon.AddVm(vm)

	return nil
}
Exemple #16
0
func CreateInterface(index int, pciAddr int, name string, isDefault bool, callback chan QemuEvent) {
	inf, err := network.Allocate("")
	if err != nil {
		glog.Error("interface creating failed", err.Error())
		callback <- &DeviceFailed{
			session: &InterfaceCreated{Index: index, PCIAddr: pciAddr, DeviceName: name},
		}
		return
	}

	interfaceGot(index, pciAddr, name, isDefault, callback, inf)
}
Exemple #17
0
// Delete the pool which is created in 'Init' function
func DMCleanup(dm *DeviceMapper) error {
	var parms string
	// Delete the thin pool for test
	parms = fmt.Sprintf("dmsetup remove \"/dev/mapper/%s\"", dm.PoolName)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}
	// Delete the loop device
	parms = fmt.Sprintf("losetup -d %s", dm.MetadataLoopFile)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}
	parms = fmt.Sprintf("losetup -d %s", dm.DataLoopFile)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}
	return nil
}
Exemple #18
0
func connectToInit(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.HyperSockName)
	if err != nil {
		glog.Error("Cannot re-connect to hyper socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot re-connect to hyper socket " + err.Error(),
		}
		return
	}

	go waitCmdToInit(ctx, conn.(*net.UnixConn))
}
Exemple #19
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]

	vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	code, cause, err := daemon.StartPod(podId, vmId, podArgs)
	if err != nil {
		daemon.KillVm(vmId)
		glog.Error(err.Error())
		return err
	}
	if err := daemon.WritePodAndContainers(podId); err != nil {
		glog.V(1).Info("Found an error while saveing the Containers info")
		return err
	}
	data, err := daemon.GetPodByName(podId)
	if err != nil {
		return err
	}
	userPod, err := pod.ProcessPodBytes(data)
	if err != nil {
		return err
	}

	vm := &Vm{
		Id:     vmId,
		Pod:    daemon.podList[podId],
		Status: types.S_VM_ASSOCIATED,
		Cpu:    userPod.Resource.Vcpu,
		Mem:    userPod.Resource.Memory,
	}
	daemon.podList[podId].Vm = vmId
	daemon.AddVm(vm)

	// Prepare the qemu status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemple #20
0
func (ctx *VmContext) loop() {
	for ctx.handler != nil {
		ev, ok := <-ctx.hub
		if !ok {
			glog.Error("hub chan has already been closed")
			break
		} else if ev == nil {
			glog.V(1).Info("got nil event.")
			continue
		}
		glog.V(1).Infof("main event loop got message %d(%s)", ev.Event(), EventString(ev.Event()))
		ctx.handler(ctx, ev)
	}
}
Exemple #21
0
func UpAndAddToBridge(name string) error {
	inf, err := net.InterfaceByName(name)
	if err != nil {
		glog.Error("cannot find network interface ", name)
		return err
	}
	brg, err := net.InterfaceByName(BridgeIface)
	if err != nil {
		glog.Error("cannot find bridge interface ", BridgeIface)
		return err
	}
	err = AddToBridge(inf, brg)
	if err != nil {
		glog.Errorf("cannot add %s to %s ", name, BridgeIface)
		return err
	}
	err = NetworkLinkUp(inf)
	if err != nil {
		glog.Error("cannot up interface ", name)
		return err
	}

	return nil
}
Exemple #22
0
func statePodStopping(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
	} else {
		switch ev.Event() {
		case COMMAND_RELEASE:
			glog.Info("pod stopping, got release, quit.")
			ctx.unsetTimeout()
			ctx.shutdownVM(false, "got release, quit")
			ctx.Become(stateTerminating, "TERMINATING")
			ctx.reportVmShutdown()
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[Stopping] got init ack to %d", ack.reply)
			if ack.reply == INIT_STOPPOD {
				glog.Info("POD stopped ", string(ack.msg))
				ctx.detatchDevice()
				ctx.Become(stateCleaning, "CLEANING")
			}
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_STOPPOD {
				ctx.unsetTimeout()
				ctx.shutdownVM(true, "Stop pod failed as init report")
				ctx.Become(stateTerminating, "TERMINATING")
				glog.Error("Stop pod failed as init report")
			}
		case EVENT_QEMU_TIMEOUT:
			reason := "stopping POD timeout"
			ctx.shutdownVM(true, reason)
			ctx.Become(stateTerminating, "TERMINATING")
			glog.Error(reason)
		default:
			glog.Warning("got unexpected event during pod stopping")
		}
	}
}
Exemple #23
0
func (xc *XenContext) AddNic(ctx *hypervisor.VmContext, host *hypervisor.HostNicInfo, guest *hypervisor.GuestNicInfo) {
	go func() {
		callback := &hypervisor.NetDevInsertedEvent{
			Index:      guest.Index,
			DeviceName: guest.Device,
			Address:    guest.Busaddr,
		}

		glog.V(1).Infof("allocate nic %s for dom %d", host.Mac, xc.domId)
		hw, err := net.ParseMAC(host.Mac)
		if err == nil {
			//dev := fmt.Sprintf("vif%d.%d", xc.domId, guest.Index)
			dev := host.Device
			glog.V(1).Infof("add network for %d - ip: %s, br: %s, gw: %s, dev: %s, hw: %s", xc.domId, guest.Ipaddr,
				host.Bridge, host.Bridge, dev, hw.String())

			res := HyperxlNicAdd(xc.driver.Ctx, (uint32)(xc.domId), guest.Ipaddr, host.Bridge, host.Bridge, dev, []byte(hw))
			if res == 0 {

				glog.V(1).Infof("nic %s insert succeeded", guest.Device)

				err = network.UpAndAddToBridge(fmt.Sprintf("vif%d.%d", xc.domId, guest.Index))
				if err != nil {
					glog.Error("fail to add vif to bridge: ", err.Error())
					ctx.Hub <- &hypervisor.DeviceFailed{
						Session: callback,
					}
					HyperxlNicRemove(xc.driver.Ctx, (uint32)(xc.domId), host.Mac)
					return
				}

				ctx.Hub <- callback
				return
			}
			glog.V(1).Infof("nic %s insert succeeded [faked] ", guest.Device)
			ctx.Hub <- callback
			return
		}

		glog.Errorf("nic %s insert failed", guest.Device)
		ctx.Hub <- &hypervisor.DeviceFailed{
			Session: callback,
		}
	}()
}
Exemple #24
0
func DriversProbe() hypervisor.HypervisorDriver {
	xd := xen.InitDriver()
	if xd != nil {
		glog.Info("Xen Driver Loaded.")
		return xd
	}

	qd := &qemu.QemuDriver{}
	if err := qd.Initialize(); err == nil {
		glog.Info("Qemu Driver Loaded")
		return qd
	} else {
		glog.Info("Qemu Driver Load failed: ", err.Error())
	}

	glog.Error("No driver available")
	return nil
}
Exemple #25
0
func waitPts(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.TtySockName)
	if err != nil {
		glog.Error("Cannot connect to tty socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot connect to tty socket " + err.Error(),
		}
		return
	}

	glog.V(1).Info("tty socket connected")

	go waitTtyMessage(ctx, conn.(*net.UnixConn))

	for {
		res, err := readTtyMessage(conn.(*net.UnixConn))
		if err != nil {
			glog.V(1).Info("tty socket closed, quit the reading goroutine ", err.Error())
			ctx.Hub <- &Interrupted{Reason: "tty socket failed " + err.Error()}
			close(ctx.ptys.channel)
			return
		}
		if ta, ok := ctx.ptys.ttys[res.session]; ok {
			if len(res.message) == 0 {
				glog.V(1).Infof("session %d closed by peer, close pty", res.session)
				ctx.ptys.Close(ctx, res.session)
			} else {
				for _, tty := range ta.attachments {
					if tty.Stdout != nil {
						_, err := tty.Stdout.Write(res.message)
						if err != nil {
							glog.V(1).Infof("fail to write session %d, close pty attachment", res.session)
							ctx.ptys.Detach(ctx, res.session, tty)
						}
					}
				}
			}
		}
	}
}
Exemple #26
0
func qmpReceiver(ch chan QmpInteraction, decoder *json.Decoder) {
	glog.V(0).Info("Begin receive QMP message")
	for {
		rsp := &QmpResponse{}
		if err := decoder.Decode(rsp); err == io.EOF {
			glog.Info("QMP exit as got EOF")
			ch <- &QmpInternalError{cause: err.Error()}
			return
		} else if err != nil {
			glog.Error("QMP receive and decode error: ", err.Error())
			ch <- &QmpInternalError{cause: err.Error()}
			return
		}
		msg := rsp.msg
		ch <- msg

		if msg.MessageType() == QMP_EVENT && msg.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
			glog.V(0).Info("Shutdown, quit QMP receiver")
			return
		}
	}
}
Exemple #27
0
func interfaceGot(index int, pciAddr int, name string, isDefault bool, callback chan QemuEvent, inf *network.Settings) {

	ip, nw, err := net.ParseCIDR(fmt.Sprintf("%s/%d", inf.IPAddress, inf.IPPrefixLen))
	if err != nil {
		glog.Error("can not parse cidr")
		callback <- &DeviceFailed{
			session: &InterfaceCreated{Index: index, PCIAddr: pciAddr, DeviceName: name},
		}
		return
	}
	var tmp []byte = nw.Mask
	var mask net.IP = tmp

	rt := []*RouteRule{
	//        &RouteRule{
	//            Destination: fmt.Sprintf("%s/%d", nw.IP.String(), inf.IPPrefixLen),
	//            Gateway:"", ViaThis:true,
	//        },
	}
	if isDefault {
		rt = append(rt, &RouteRule{
			Destination: "0.0.0.0/0",
			Gateway:     inf.Gateway, ViaThis: true,
		})
	}

	event := &InterfaceCreated{
		Index:      index,
		PCIAddr:    pciAddr,
		DeviceName: name,
		Fd:         inf.File,
		MacAddr:    inf.Mac,
		IpAddr:     ip.String(),
		NetMask:    mask.String(),
		RouteTable: rt,
	}

	callback <- event
}
Exemple #28
0
func (ctx *VmContext) setWindowSize(tag string, size *WindowSize) {
	if session, ok := ctx.ttySessions[tag]; ok {
		cmd := map[string]interface{}{
			"seq":    session,
			"row":    size.Row,
			"column": size.Column,
		}
		msg, err := json.Marshal(cmd)
		if err != nil {
			ctx.reportBadRequest(fmt.Sprintf("command window size parse failed"))
			return
		}
		ctx.vm <- &DecodedMessage{
			code:    INIT_WINSIZE,
			message: msg,
		}
	} else {
		msg := fmt.Sprintf("cannot resolve client tag %s", tag)
		ctx.reportBadRequest(msg)
		glog.Error(msg)
	}
}
Exemple #29
0
func readVmMessage(conn *net.UnixConn) (*DecodedMessage, error) {
	needRead := 8
	length := 0
	read := 0
	buf := make([]byte, 512)
	res := []byte{}
	for read < needRead {
		want := needRead - read
		if want > 512 {
			want = 512
		}
		glog.V(1).Infof("trying to read %d bytes", want)
		nr, err := conn.Read(buf[:want])
		if err != nil {
			glog.Error("read init data failed")
			return nil, err
		}

		res = append(res, buf[:nr]...)
		read = read + nr

		glog.V(1).Infof("read %d/%d [length = %d]", read, needRead, length)

		if length == 0 && read >= 8 {
			length = int(binary.BigEndian.Uint32(res[4:8]))
			glog.V(1).Infof("data length is %d", length)
			if length > 8 {
				needRead = length
			}
		}
	}

	return &DecodedMessage{
		code:    binary.BigEndian.Uint32(res[:4]),
		message: res[8:],
	}, nil
}
Exemple #30
0
func readTtyMessage(conn *net.UnixConn) (*ttyMessage, error) {
	needRead := 12
	length := 0
	read := 0
	buf := make([]byte, 512)
	res := []byte{}
	for read < needRead {
		want := needRead - read
		if want > 512 {
			want = 512
		}
		glog.V(1).Infof("tty: trying to read %d bytes", want)
		nr, err := conn.Read(buf[:want])
		if err != nil {
			glog.Error("read tty data failed")
			return nil, err
		}

		res = append(res, buf[:nr]...)
		read = read + nr

		glog.V(1).Infof("tty: read %d/%d [length = %d]", read, needRead, length)

		if length == 0 && read >= 12 {
			length = int(binary.BigEndian.Uint32(res[8:12]))
			glog.V(1).Infof("data length is %d", length)
			if length > 12 {
				needRead = length
			}
		}
	}

	return &ttyMessage{
		session: binary.BigEndian.Uint64(res[:8]),
		message: res[12:],
	}, nil
}