Exemplo n.º 1
0
func waitInitReady(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.HyperSockName)
	if err != nil {
		glog.Error("Cannot connect to hyper socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot connect to hyper socket " + err.Error(),
		}
		return
	}

	glog.Info("Wating for init messages...")

	msg, err := readVmMessage(conn.(*net.UnixConn))
	if err != nil {
		glog.Error("read init message failed... ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "read init message failed... " + err.Error(),
		}
		conn.Close()
	} else if msg.code == INIT_READY {
		glog.Info("Get init ready message")
		ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)}
		go waitCmdToInit(ctx, conn.(*net.UnixConn))
	} else {
		glog.Warningf("Get init message %d", msg.code)
		ctx.Hub <- &InitFailedEvent{
			Reason: fmt.Sprintf("Get init message %d", msg.code),
		}
		conn.Close()
	}
}
Exemplo n.º 2
0
func InitDriver() *XenDriver {
	xd := &XenDriver{}
	if err := xd.Initialize(); err == nil {
		glog.Info("Xen Driver Loaded.")
		globalDriver = xd
		return globalDriver
	} else {
		glog.Info("Xen Driver Load failed: ", err.Error())
		return nil
	}
}
Exemplo n.º 3
0
func qmpCommander(handler chan QmpInteraction, conn *net.UnixConn, session *QmpSession, feedback chan QmpInteraction) {
	glog.V(1).Info("Begin process command session")
	for _, cmd := range session.commands {
		msg, err := json.Marshal(*cmd)
		if err != nil {
			handler <- qmpFail("cannot marshal command", session.callback)
			return
		}

		success := false
		var qe *QmpError = nil
		for repeat := 0; !success && repeat < 3; repeat++ {

			if len(cmd.Scm) > 0 {
				glog.V(1).Infof("send cmd with scm (%d bytes) (%d) %s", len(cmd.Scm), repeat+1, string(msg))
				f, _ := conn.File()
				fd := f.Fd()
				syscall.Sendmsg(int(fd), msg, cmd.Scm, nil, 0)
			} else {
				glog.V(1).Infof("sending command (%d) %s", repeat+1, string(msg))
				conn.Write(msg)
			}

			res, ok := <-feedback
			if !ok {
				glog.Info("QMP command result chan closed")
				return
			}
			switch res.MessageType() {
			case QMP_RESULT:
				success = true
				break
			//success
			case QMP_ERROR:
				glog.Warning("got one qmp error")
				qe = res.(*QmpError)
				time.Sleep(1000 * time.Millisecond)
			case QMP_INTERNAL_ERROR:
				glog.Info("QMP quit... commander quit... ")
				return
			}
		}

		if !success {
			handler <- qe.Finish(session.callback)
			return
		}
	}
	handler <- session.Finish()
	return
}
Exemplo n.º 4
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := unixSocketConnect(ctx.consoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.consoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.consoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.consoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.consoleSockName)

	cout := make(chan string, 128)
	go ttyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Exemplo n.º 5
0
func stateCleaning(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, false); processed {
	} else if processed, success := deviceRemoveHandler(ctx, ev); processed {
		if !success {
			glog.Warning("fail to unplug devices for stop")
			ctx.poweroffVM(true, "fail to unplug devices")
			ctx.Become(stateDestroying, "DESTROYING")
		} else if ctx.deviceReady() {
			//            ctx.reset()
			//            ctx.unsetTimeout()
			//            ctx.reportPodStopped()
			//            glog.V(1).Info("device ready, could run pod.")
			//            ctx.Become(stateInit, "INIT")
			ctx.vm <- &DecodedMessage{
				code:    INIT_READY,
				message: []byte{},
			}
			glog.V(1).Info("device ready, could run pod.")
		}
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.poweroffVM(true, "fail to unplug devices")
		ctx.Become(stateDestroying, "DESTROYING")
	} else {
		switch ev.Event() {
		case COMMAND_RELEASE:
			glog.Info("vm cleaning to idle, got release, quit")
			ctx.reportVmShutdown()
			ctx.Become(stateDestroying, "DESTROYING")
		case EVENT_QEMU_TIMEOUT:
			glog.Warning("Qemu did not exit in time, try to stop it")
			ctx.poweroffVM(true, "pod stopp/unplug timeout")
			ctx.Become(stateDestroying, "DESTROYING")
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[cleaning] Got reply to %d: '%s'", ack.reply, string(ack.msg))
			if ack.reply == INIT_READY {
				ctx.reset()
				ctx.unsetTimeout()
				ctx.reportPodStopped()
				glog.Info("init has been acknowledged, could run pod.")
				ctx.Become(stateInit, "INIT")
			}
		default:
			glog.V(1).Info("got event message while cleaning")
		}
	}
}
Exemplo n.º 6
0
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
	cInfo []*ContainerInfo, vInfo []*VolumeInfo) {

	ctx.lock.Lock()
	defer ctx.lock.Unlock()

	for i := 0; i < InterfaceCount; i++ {
		ctx.progress.adding.networks[i] = true
	}

	if cInfo == nil {
		cInfo = []*ContainerInfo{}
	}

	if vInfo == nil {
		vInfo = []*VolumeInfo{}
	}

	ctx.initVolumeMap(spec)

	if glog.V(3) {
		for i, c := range cInfo {
			glog.Infof("#%d Container Info:", i)
			b, err := json.MarshalIndent(c, "...|", "    ")
			if err == nil {
				glog.Info("\n", string(b))
			}
		}
	}

	containers := make([]VmContainer, len(spec.Containers))

	for i, container := range spec.Containers {

		ctx.initContainerInfo(i, &containers[i], &container)
		ctx.setContainerInfo(i, &containers[i], cInfo[i])

		if spec.Tty {
			containers[i].Tty = ctx.attachId
			ctx.attachId++
			ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
		}
	}

	ctx.vmSpec = &VmPod{
		Hostname:   spec.Name,
		Containers: containers,
		Interfaces: nil,
		Routes:     nil,
		ShareDir:   ShareDirTag,
	}

	for _, vol := range vInfo {
		ctx.setVolumeInfo(vol)
	}

	ctx.userSpec = spec
	ctx.wg = wg
}
Exemplo n.º 7
0
func stateDestroying(ctx *VmContext, ev QemuEvent) {
	if processed, _ := deviceRemoveHandler(ctx, ev); processed {
		if closed := ctx.tryClose(); closed {
			glog.Info("resources reclaimed, quit...")
		}
	} else {
		switch ev.Event() {
		case EVENT_QMP_EVENT:
			if ev.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
				glog.Info("Got QMP shutdown event")
				ctx.unsetTimeout()
				if closed := ctx.onQemuExit(false); closed {
					glog.Info("VM Context closed.")
				}
			}
		case EVENT_QEMU_KILL:
			glog.Info("Got Qemu force killed message")
			ctx.unsetTimeout()
			if closed := ctx.onQemuExit(true); closed {
				glog.Info("VM Context closed.")
			}
		case ERROR_INTERRUPTED:
			glog.V(1).Info("Connection interrupted while destroying")
		case COMMAND_RELEASE:
			glog.Info("vm destroying, got release")
			ctx.reportVmShutdown()
		case EVENT_QEMU_TIMEOUT:
			glog.Info("Device removing timeout")
			ctx.Close()
		default:
			glog.Warning("got event during vm cleaning up")
		}
	}
}
Exemplo n.º 8
0
func DriversProbe() hypervisor.HypervisorDriver {
	xd := xen.InitDriver()
	if xd != nil {
		glog.Info("Xen Driver Loaded.")
		return xd
	}

	qd := &qemu.QemuDriver{}
	if err := qd.Initialize(); err == nil {
		glog.Info("Qemu Driver Loaded")
		return qd
	} else {
		glog.Info("Qemu Driver Load failed: ", err.Error())
	}

	glog.Error("No driver available")
	return nil
}
Exemplo n.º 9
0
func (qc *QemuContext) Kill(ctx *hypervisor.VmContext) {
	defer func() {
		err := recover()
		if glog.V(1) && err != nil {
			glog.Info("kill qemu, but channel has already been closed")
		}
	}()
	qc.wdt <- "kill"
}
Exemplo n.º 10
0
func stateTerminating(ctx *VmContext, ev QemuEvent) {
	switch ev.Event() {
	case EVENT_QMP_EVENT:
		if ev.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
			glog.Info("Got QMP shutdown event while terminating, go to cleaning up")
			ctx.unsetTimeout()
			if closed := ctx.onQemuExit(true); !closed {
				ctx.Become(stateDestroying, "DESTROYING")
			}
		}
	case EVENT_QEMU_KILL:
		glog.Info("Got Qemu force killed message, go to cleaning up")
		ctx.unsetTimeout()
		if closed := ctx.onQemuExit(true); !closed {
			ctx.Become(stateDestroying, "DESTROYING")
		}
	case COMMAND_RELEASE:
		glog.Info("vm terminating, got release")
		ctx.reportVmShutdown()
	case COMMAND_ACK:
		ack := ev.(*CommandAck)
		glog.V(1).Infof("[Terminating] Got reply to %d: '%s'", ack.reply, string(ack.msg))
		if ack.reply == INIT_DESTROYPOD {
			glog.Info("POD destroyed ", string(ack.msg))
			ctx.poweroffVM(false, "")
		}
	case ERROR_CMD_FAIL:
		ack := ev.(*CommandError)
		if ack.context.code == INIT_DESTROYPOD {
			glog.Warning("Destroy pod failed")
			ctx.poweroffVM(true, "Destroy pod failed")
		}
	case EVENT_QEMU_TIMEOUT:
		glog.Warning("Qemu did not exit in time, try to stop it")
		ctx.poweroffVM(true, "vm terminating timeout")
	case ERROR_INTERRUPTED:
		glog.V(1).Info("Connection interrupted while terminating")
	default:
		glog.V(1).Info("got event during terminating")
	}
}
Exemplo n.º 11
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]

	vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	code, cause, err := daemon.StartPod(podId, vmId, podArgs)
	if err != nil {
		daemon.KillVm(vmId)
		glog.Error(err.Error())
		return err
	}
	if err := daemon.WritePodAndContainers(podId); err != nil {
		glog.V(1).Info("Found an error while saveing the Containers info")
		return err
	}
	data, err := daemon.GetPodByName(podId)
	if err != nil {
		return err
	}
	userPod, err := pod.ProcessPodBytes(data)
	if err != nil {
		return err
	}

	vm := &Vm{
		Id:     vmId,
		Pod:    daemon.podList[podId],
		Status: types.S_VM_ASSOCIATED,
		Cpu:    userPod.Resource.Vcpu,
		Mem:    userPod.Resource.Memory,
	}
	daemon.podList[podId].Vm = vmId
	daemon.AddVm(vm)

	// Prepare the qemu status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 12
0
func (pts *pseudoTtys) ptyConnect(ctx *VmContext, container int, session uint64, tty *TtyIO) {

	pts.lock.Lock()
	if ta, ok := pts.ttys[session]; ok {
		ta.attach(tty)
	} else {
		pts.ttys[session] = newAttachmentsWithTty(container, false, tty)
	}
	pts.lock.Unlock()

	if tty.Stdin != nil {
		go func() {
			buf := make([]byte, 32)
			defer pts.Detach(ctx, session, tty)
			defer func() { recover() }()
			for {
				nr, err := tty.Stdin.Read(buf)
				if err != nil {
					glog.Info("a stdin closed, ", err.Error())
					return
				} else if nr == 1 && buf[0] == ExitChar {
					glog.Info("got stdin detach char, exit term")
					return
				}

				glog.V(3).Infof("trying to input char: %d and %d chars", buf[0], nr)

				mbuf := make([]byte, nr)
				copy(mbuf, buf[:nr])
				pts.channel <- &ttyMessage{
					session: session,
					message: mbuf[:nr],
				}
			}
		}()
	}

	return
}
Exemplo n.º 13
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(ctx *VmContext) {
	qemu, err := exec.LookPath("qemu-system-x86_64")
	if err != nil {
		ctx.hub <- &QemuExitEvent{message: "can not find qemu executable"}
		return
	}

	args := ctx.QemuArguments()

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	go waitConsoleOutput(ctx)

	pipe := make([]int, 2)
	err = syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.hub <- &QemuExitEvent{message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.watchPid(int(pid))
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.hub <- &QemuExitEvent{message: "watch qemu process failed"}
		return
	}
}
Exemplo n.º 14
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
	qemu := qc.driver.executable
	if qemu == "" {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
		return
	}

	args := qc.arguments(ctx)

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	pipe := make([]int, 2)
	err := syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
		return
	}
}
Exemplo n.º 15
0
// state machine
func commonStateHandler(ctx *VmContext, ev QemuEvent, hasPod bool) bool {
	processed := true
	switch ev.Event() {
	case EVENT_QMP_EVENT:
		if ev.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
			glog.Info("Got QMP shutdown event, go to cleaning up")
			ctx.unsetTimeout()
			if closed := ctx.onQemuExit(hasPod); !closed {
				ctx.Become(stateDestroying, "DESTROYING")
			}
		} else {
			processed = false
		}
	case ERROR_INTERRUPTED:
		glog.Info("Connection interrupted, quit...")
		ctx.exitVM(true, "connection to VM broken", hasPod)
	case COMMAND_SHUTDOWN:
		glog.Info("got shutdown command, shutting down")
		ctx.exitVM(false, "", hasPod)
	default:
		processed = false
	}
	return processed
}
Exemplo n.º 16
0
func statePodStopping(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
	} else {
		switch ev.Event() {
		case COMMAND_RELEASE:
			glog.Info("pod stopping, got release, quit.")
			ctx.unsetTimeout()
			ctx.shutdownVM(false, "got release, quit")
			ctx.Become(stateTerminating, "TERMINATING")
			ctx.reportVmShutdown()
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[Stopping] got init ack to %d", ack.reply)
			if ack.reply == INIT_STOPPOD {
				glog.Info("POD stopped ", string(ack.msg))
				ctx.detatchDevice()
				ctx.Become(stateCleaning, "CLEANING")
			}
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_STOPPOD {
				ctx.unsetTimeout()
				ctx.shutdownVM(true, "Stop pod failed as init report")
				ctx.Become(stateTerminating, "TERMINATING")
				glog.Error("Stop pod failed as init report")
			}
		case EVENT_QEMU_TIMEOUT:
			reason := "stopping POD timeout"
			ctx.shutdownVM(true, reason)
			ctx.Become(stateTerminating, "TERMINATING")
			glog.Error(reason)
		default:
			glog.Warning("got unexpected event during pod stopping")
		}
	}
}
Exemplo n.º 17
0
func stateInit(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, false); processed {
		//processed by common
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during init environment")
		ctx.Become(stateDestroying, "DESTROYING")
	} else {
		switch ev.Event() {
		case EVENT_QEMU_EXIT:
			glog.Error("Qemu did not start up properly, go to cleaning up")
			ctx.reportVmFault("Qemu did not start up properly, go to cleaning up")
			ctx.Close()
		case EVENT_INIT_CONNECTED:
			glog.Info("begin to wait vm commands")
			ctx.reportVmRun()
		case COMMAND_RELEASE:
			glog.Info("no pod on vm, got release, quit.")
			ctx.shutdownVM(false, "")
			ctx.Become(stateDestroying, "DESTRYING")
			ctx.reportVmShutdown()
		case COMMAND_EXEC:
			ctx.execCmd(ev.(*ExecCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			ctx.setWindowSize(cmd.ClientTag, cmd.Size)
		case COMMAND_RUN_POD, COMMAND_REPLACE_POD:
			glog.Info("got spec, prepare devices")
			if ok := ctx.prepareDevice(ev.(*RunPodCommand)); ok {
				ctx.setTimeout(60)
				ctx.Become(stateStarting, "STARTING")
			}
		default:
			glog.Warning("got event during pod initiating")
		}
	}
}
Exemplo n.º 18
0
func stateRunning(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during reconnect to a running pod")
		ctx.Become(stateTerminating, "TERMINATING")
	} else {
		switch ev.Event() {
		case COMMAND_STOP_POD:
			ctx.stopPod()
			ctx.Become(statePodStopping, "STOPPING")
		case COMMAND_RELEASE:
			glog.Info("pod is running, got release command, let qemu fly")
			ctx.Become(nil, "NONE")
			ctx.reportSuccess("", nil)
		case COMMAND_EXEC:
			ctx.execCmd(ev.(*ExecCommand))
		case COMMAND_ATTACH:
			ctx.attachCmd(ev.(*AttachCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			if ctx.userSpec.Tty {
				ctx.setWindowSize(cmd.ClientTag, cmd.Size)
			}
		case EVENT_POD_FINISH:
			result := ev.(*PodFinished)
			ctx.reportPodFinished(result)
			ctx.shutdownVM(false, "")
			ctx.Become(stateTerminating, "TERMINATING")
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[running] got init ack to %d", ack.reply)
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_EXECCMD {
				cmd := ExecCommand{}
				json.Unmarshal(ack.context.message, &cmd)
				ctx.ptys.Close(ctx, cmd.Sequence)
				glog.V(0).Infof("Exec command %s on session %d failed", cmd.Command[0], cmd.Sequence)
			}
		default:
			glog.Warning("got unexpected event during pod running")
		}
	}
}
Exemplo n.º 19
0
func (ctx *VmContext) prepareDevice(cmd *RunPodCommand) bool {

	if len(cmd.Spec.Containers) != len(cmd.Containers) {
		ctx.reportBadRequest("Spec and Container Info mismatch")
		return false
	}

	ctx.InitDeviceContext(cmd.Spec, cmd.Containers, cmd.Volumes)

	if glog.V(2) {
		res, _ := json.MarshalIndent(*ctx.vmSpec, "    ", "    ")
		glog.Info("initial vm spec: ", string(res))
	}

	ctx.allocateNetworks()
	ctx.addBlockDevices()

	return true
}
Exemplo n.º 20
0
func qmpReceiver(ch chan QmpInteraction, decoder *json.Decoder) {
	glog.V(0).Info("Begin receive QMP message")
	for {
		rsp := &QmpResponse{}
		if err := decoder.Decode(rsp); err == io.EOF {
			glog.Info("QMP exit as got EOF")
			ch <- &QmpInternalError{cause: err.Error()}
			return
		} else if err != nil {
			glog.Error("QMP receive and decode error: ", err.Error())
			ch <- &QmpInternalError{cause: err.Error()}
			return
		}
		msg := rsp.msg
		ch <- msg

		if msg.MessageType() == QMP_EVENT && msg.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
			glog.V(0).Info("Shutdown, quit QMP receiver")
			return
		}
	}
}
Exemplo n.º 21
0
func waitCmdToInit(ctx *VmContext, init *net.UnixConn) {
	looping := true
	cmds := []*DecodedMessage{}

	var pingTimer *time.Timer = nil
	var pongTimer *time.Timer = nil

	go waitInitAck(ctx, init)

	for looping {
		cmd, ok := <-ctx.vm
		if !ok {
			glog.Info("vm channel closed, quit")
			break
		}
		if cmd.code == INIT_ACK || cmd.code == INIT_ERROR {
			if len(cmds) > 0 {
				if cmds[0].code == INIT_DESTROYPOD {
					glog.Info("got response of shutdown command, last round of command to init")
					looping = false
				}
				if cmd.code == INIT_ACK {
					if cmds[0].code != INIT_PING {
						ctx.Hub <- &CommandAck{
							reply: cmds[0].code,
							msg:   cmd.message,
						}
					}
				} else {
					ctx.Hub <- &CommandError{
						context: cmds[0],
						msg:     cmd.message,
					}
				}
				cmds = cmds[1:]

				if pongTimer != nil {
					glog.V(1).Info("ack got, clear pong timer")
					pongTimer.Stop()
					pongTimer = nil
				}
				if pingTimer == nil {
					pingTimer = time.AfterFunc(30*time.Second, func() {
						defer func() { recover() }()
						glog.V(1).Info("Send ping message to init")
						ctx.vm <- &DecodedMessage{
							code:    INIT_PING,
							message: []byte{},
						}
						pingTimer = nil
					})
				} else {
					pingTimer.Reset(30 * time.Second)
				}
			} else {
				glog.Error("got ack but no command in queue")
			}
		} else if cmd.code == INIT_FINISHPOD {
			num := len(cmd.message) / 4
			results := make([]uint32, num)
			for i := 0; i < num; i++ {
				results[i] = binary.BigEndian.Uint32(cmd.message[i*4 : i*4+4])
			}

			for _, c := range cmds {
				if c.code == INIT_DESTROYPOD {
					glog.Info("got pod finish message after having send destroy message")
					looping = false
					ctx.Hub <- &CommandAck{
						reply: c.code,
					}
					break
				}
			}

			glog.V(1).Infof("Pod finished, returned %d values", num)

			ctx.Hub <- &PodFinished{
				result: results,
			}
		} else {
			if glog.V(1) {
				glog.Infof("send command %d to init, payload: '%s'.", cmd.code, string(cmd.message))
			}
			init.Write(newVmMessage(cmd))
			cmds = append(cmds, cmd)
			if pongTimer == nil {
				glog.V(1).Info("message sent, set pong timer")
				pongTimer = time.AfterFunc(30*time.Second, func() {
					ctx.Hub <- &Interrupted{Reason: "init not reply ping mesg"}
				})
			}
		}
	}

	if pingTimer != nil {
		pingTimer.Stop()
	}
	if pongTimer != nil {
		pongTimer.Stop()
	}
}
Exemplo n.º 22
0
func stateStarting(ctx *VmContext, ev QemuEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
		//processed by common
	} else if processed := deviceInitHandler(ctx, ev); processed {
		if ctx.deviceReady() {
			glog.V(1).Info("device ready, could run pod.")
			ctx.startPod()
		}
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during init pod running environment")
		ctx.Become(stateTerminating, "TERMINATING")
	} else {
		switch ev.Event() {
		case EVENT_QEMU_EXIT:
			glog.Info("Qemu did not start up properly, go to cleaning up")
			if closed := ctx.onQemuExit(true); !closed {
				ctx.Become(stateDestroying, "DESTROYING")
			}
		case EVENT_INIT_CONNECTED:
			glog.Info("begin to wait vm commands")
			ctx.reportVmRun()
		case COMMAND_RELEASE:
			glog.Info("pod starting, got release, please wait")
			ctx.reportBusy("")
		case COMMAND_ATTACH:
			ctx.attachCmd(ev.(*AttachCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			if ctx.userSpec.Tty {
				ctx.setWindowSize(cmd.ClientTag, cmd.Size)
			}
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[starting] got init ack to %d", ack.reply)
			if ack.reply == INIT_STARTPOD {
				ctx.unsetTimeout()
				var pinfo []byte = []byte{}
				persist, err := ctx.dump()
				if err == nil {
					buf, err := persist.serialize()
					if err == nil {
						pinfo = buf
					}
				}
				ctx.reportSuccess("Start POD success", pinfo)
				ctx.Become(stateRunning, "RUNNING")
				glog.Info("pod start success ", string(ack.msg))
			}
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_STARTPOD {
				reason := "Start POD failed"
				ctx.shutdownVM(true, reason)
				ctx.Become(stateTerminating, "TERMINATING")
				glog.Error(reason)
			}
		case EVENT_QEMU_TIMEOUT:
			reason := "Start POD timeout"
			ctx.shutdownVM(true, reason)
			ctx.Become(stateTerminating, "TERMINATING")
			glog.Error(reason)
		default:
			glog.Warning("got event during pod initiating")
		}
	}
}
Exemplo n.º 23
0
func daemon(cmd string, argv []string, pipe int) error {

	// create a subprocess
	pid, err := fork(false)
	if err != nil {
		return err
	} else if pid > 0 {
		go func() {
			wp, err := syscall.Wait4(int(pid), nil, 0, nil)
			if err == nil {
				glog.V(3).Infof("collect child %d", wp)
			} else {
				glog.Errorf("error during wait %d: %s", pid, err.Error())
			}
		}()
		// return the parent
		return nil
	}

	// exit the created one, create the daemon
	_, err = fork(true)
	if err != nil {
		glog.Error("second fork failed: ", err.Error())
		os.Exit(-1)
	}

	cur := os.Getpid()
	glog.V(1).Infof("qemu daemon pid %d.", cur)
	//Change the file mode mask
	_ = syscall.Umask(0)

	// create a new SID for the child process
	s_ret, err := syscall.Setsid()
	if err != nil {
		glog.Info("Error: syscall.Setsid errno: ", err.Error())
		os.Exit(-1)
	}
	if s_ret < 0 {
		glog.Errorf("setsid return negative value: %d", s_ret)
		os.Exit(-1)
	}

	os.Chdir("/")

	f, e := os.OpenFile("/dev/null", os.O_RDWR, 0)
	if e == nil {
		fd := f.Fd()
		syscall.Dup2(int(fd), int(os.Stdin.Fd()))
		syscall.Dup2(int(fd), int(os.Stdout.Fd()))
		syscall.Dup2(int(fd), int(os.Stderr.Fd()))
	}

	buf := make([]byte, 4)
	binary.BigEndian.PutUint32(buf, uint32(cur))
	syscall.Write(pipe, buf)
	syscall.Close(pipe)

	fds := listFd()
	for _, fd := range fds {
		if f, err := strconv.Atoi(fd); err == nil && f > 2 {
			glog.V(1).Infof("close fd %d", f)
			syscall.Close(f)
		}
	}

	err = syscall.Exec(cmd, argv, []string{})
	if err != nil {
		glog.Error("fail to exec qemu process")
		os.Exit(-1)
	}

	return nil
}
Exemplo n.º 24
0
func qmpInitializer(ctx *VmContext) {
	conn, err := unixSocketConnect(ctx.qmpSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.qmpSockName, " ", err.Error())
		ctx.qmp <- qmpFail(err.Error(), nil)
		return
	}

	glog.V(1).Info("connected to ", ctx.qmpSockName)

	var msg map[string]interface{}
	decoder := json.NewDecoder(conn)
	defer func() {
		if err != nil {
			conn.Close()
		}
	}()

	glog.Info("begin qmp init...")

	err = decoder.Decode(&msg)
	if err != nil {
		glog.Error("get qmp welcome failed: ", err.Error())
		ctx.qmp <- qmpFail(err.Error(), nil)
		return
	}

	glog.Info("got qmp welcome, now sending command qmp_capabilities")

	cmd, err := json.Marshal(QmpCommand{Execute: "qmp_capabilities"})
	if err != nil {
		glog.Error("qmp_capabilities marshal failed ", err.Error())
		ctx.qmp <- qmpFail(err.Error(), nil)
		return
	}
	_, err = conn.Write(cmd)
	if err != nil {
		glog.Error("qmp_capabilities send failed ", err.Error())
		ctx.qmp <- qmpFail(err.Error(), nil)
		return
	}

	glog.Info("waiting for response")
	rsp := &QmpResponse{}
	err = decoder.Decode(rsp)
	if err != nil {
		glog.Error("response receive failed ", err.Error())
		ctx.qmp <- qmpFail(err.Error(), nil)
		return
	}

	glog.Info("got for response")

	if rsp.msg.MessageType() == QMP_RESULT {
		glog.Info("QMP connection initialized")
		ctx.qmp <- &QmpInit{
			conn:    conn.(*net.UnixConn),
			decoder: decoder,
		}
		return
	}

	ctx.qmp <- qmpFail("handshake failed", nil)
}
Exemplo n.º 25
0
func qmpHandler(ctx *VmContext) {

	go qmpInitializer(ctx)

	timer := time.AfterFunc(10*time.Second, func() {
		glog.Warning("Initializer Timeout.")
		ctx.qmp <- &QmpTimeout{}
	})

	type msgHandler func(QmpInteraction)
	var handler msgHandler = nil
	var conn *net.UnixConn = nil

	buf := []*QmpSession{}
	res := make(chan QmpInteraction, 128)

	loop := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_SESSION:
			glog.Info("got new session")
			buf = append(buf, msg.(*QmpSession))
			if len(buf) == 1 {
				go qmpCommander(ctx.qmp, conn, msg.(*QmpSession), res)
			}
		case QMP_FINISH:
			glog.Infof("session finished, buffer size %d", len(buf))
			r := msg.(*QmpFinish)
			if r.success {
				glog.V(1).Info("success ")
				if r.callback != nil {
					ctx.hub <- r.callback
				}
			} else {
				reason := "unknown"
				if c, ok := r.reason["error"]; ok {
					reason = c.(string)
				}
				glog.Error("QMP command failed ", reason)
				ctx.hub <- &DeviceFailed{
					session: r.callback,
				}
			}
			buf = buf[1:]
			if len(buf) > 0 {
				go qmpCommander(ctx.qmp, conn, buf[0], res)
			}
		case QMP_RESULT, QMP_ERROR:
			res <- msg
		case QMP_EVENT:
			ev := msg.(*QmpEvent)
			ctx.hub <- ev
			if ev.Type == QMP_EVENT_SHUTDOWN {
				glog.Info("got QMP shutdown event, quit...")
				handler = nil
			}
		case QMP_INTERNAL_ERROR:
			res <- msg
			handler = nil
			glog.Info("QMP handler quit as received ", msg.(*QmpInternalError).cause)
			ctx.hub <- &Interrupted{reason: msg.(*QmpInternalError).cause}
		case QMP_QUIT:
			handler = nil
		}
	}

	initializing := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_INIT:
			timer.Stop()
			init := msg.(*QmpInit)
			conn = init.conn
			handler = loop
			glog.Info("QMP initialzed, go into main QMP loop")

			//routine for get message
			go qmpReceiver(ctx.qmp, init.decoder)
			if len(buf) > 0 {
				go qmpCommander(ctx.qmp, conn, buf[0], res)
			}
		case QMP_FINISH:
			finish := msg.(*QmpFinish)
			if !finish.success {
				timer.Stop()
				ctx.hub <- &InitFailedEvent{
					reason: finish.reason["error"].(string),
				}
				handler = nil
				glog.Error("QMP initialize failed")
			}
		case QMP_TIMEOUT:
			ctx.hub <- &InitFailedEvent{
				reason: "QMP Init timeout",
			}
			handler = nil
			glog.Error("QMP initialize timeout")
		case QMP_SESSION:
			glog.Info("got new session during initializing")
			buf = append(buf, msg.(*QmpSession))
		}
	}

	handler = initializing

	for handler != nil {
		msg := <-ctx.qmp
		handler(msg)
	}
}
Exemplo n.º 26
0
func (daemon *Daemon) CmdPodStart(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podId := job.Args[0]
	vmId := job.Args[1]

	glog.Info("pod:%s, vm:%s", podId, vmId)
	// Do the status check for the given pod
	if pod, ok := daemon.podList[podId]; ok {
		if pod.Status == types.S_POD_RUNNING {
			return fmt.Errorf("The pod(%s) is running, can not start it", podId)
		} else {
			if pod.Type == "kubernetes" && pod.Status != types.S_POD_CREATED {
				return fmt.Errorf("The pod(%s) is finished with kubernetes type, can not start it again", podId)
			}
		}
	} else {
		return fmt.Errorf("The pod(%s) can not be found, please create it first", podId)
	}
	data, err := daemon.GetPodByName(podId)
	if err != nil {
		return err
	}
	userPod, err := pod.ProcessPodBytes(data)
	if err != nil {
		return err
	}
	if vmId == "" {
		vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
	} else {
		if _, ok := daemon.vmList[vmId]; !ok {
			return fmt.Errorf("The VM %s doesn't exist", vmId)
		}
		if userPod.Resource.Vcpu != daemon.vmList[vmId].Cpu {
			return fmt.Errorf("The new pod's cpu setting is different the current VM's cpu")
		}
		if userPod.Resource.Memory != daemon.vmList[vmId].Mem {
			return fmt.Errorf("The new pod's memory setting is different the current VM's memory")
		}
	}

	code, cause, err := daemon.StartPod(podId, vmId, "")
	if err != nil {
		daemon.KillVm(vmId)
		glog.Error(err.Error())
		return err
	}

	vm := &Vm{
		Id:     vmId,
		Pod:    daemon.podList[podId],
		Status: types.S_VM_ASSOCIATED,
		Cpu:    userPod.Resource.Vcpu,
		Mem:    userPod.Resource.Memory,
	}
	daemon.podList[podId].Vm = vmId
	daemon.AddVm(vm)

	// Prepare the qemu status to client
	v := &engine.Env{}
	v.Set("ID", vmId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}