Exemplo n.º 1
0
Arquivo: lazy.go Projeto: cofyc/runv
func statePreparing(ctx *VmContext, ev VmEvent) {
	switch ev.Event() {
	case EVENT_VM_EXIT, ERROR_INTERRUPTED:
		glog.Info("VM exited before start...")
	case COMMAND_SHUTDOWN, COMMAND_RELEASE:
		glog.Info("got shutdown or release command, not started yet")
		ctx.reportVmShutdown()
		ctx.Become(nil, "NONE")
	case COMMAND_EXEC:
		ctx.execCmd(ev.(*ExecCommand))
	case COMMAND_WINDOWSIZE:
		cmd := ev.(*WindowSizeCommand)
		ctx.setWindowSize(cmd.ClientTag, cmd.Size)
	case COMMAND_RUN_POD, COMMAND_REPLACE_POD:
		glog.Info("got spec, prepare devices")
		if ok := ctx.lazyPrepareDevice(ev.(*RunPodCommand)); ok {
			ctx.startSocks()
			ctx.DCtx.(LazyDriverContext).LazyLaunch(ctx)
			ctx.setTimeout(60)
			ctx.Become(stateStarting, "STARTING")
		} else {
			glog.Warning("Fail to prepare devices, quit")
			ctx.Become(nil, "None")
		}
	default:
		glog.Warning("got event during pod initiating")
	}
}
Exemplo n.º 2
0
func waitInitReady(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.HyperSockName)
	if err != nil {
		glog.Error("Cannot connect to hyper socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot connect to hyper socket " + err.Error(),
		}
		return
	}

	glog.Info("Wating for init messages...")

	msg, err := readVmMessage(conn.(*net.UnixConn))
	if err != nil {
		glog.Error("read init message failed... ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "read init message failed... " + err.Error(),
		}
		conn.Close()
	} else if msg.code == INIT_READY {
		glog.Info("Get init ready message")
		ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)}
		go waitCmdToInit(ctx, conn.(*net.UnixConn))
	} else {
		glog.Warningf("Get init message %d", msg.code)
		ctx.Hub <- &InitFailedEvent{
			Reason: fmt.Sprintf("Get init message %d", msg.code),
		}
		conn.Close()
	}
}
Exemplo n.º 3
0
func InitDriver() *XenDriver {
	if probeXend() {
		glog.Info("xend is running, can not start with xl.")
		return nil
	}

	ctx, res := HyperxlInitializeDriver()
	if res != 0 {
		glog.Info("failed to initialize xen context")
		return nil
	} else if ctx.Version < REQUIRED_VERSION {
		glog.Info("Xen version is not new enough (%d), need 4.5 or higher", ctx.Version)
		return nil
	} else {
		glog.V(1).Info("Xen capabilities: ", ctx.Capabilities)
		hvm := false
		caps := strings.Split(ctx.Capabilities, " ")
		for _, cap := range caps {
			if strings.HasPrefix(cap, "hvm-") {
				hvm = true
				break
			}
		}
		if !hvm {
			glog.Info("Xen installation does not support HVM, current capabilities: %s", ctx.Capabilities)
			return nil
		}
	}

	sigchan := make(chan os.Signal, 1)
	go func() {
		for {
			_, ok := <-sigchan
			if !ok {
				break
			}
			glog.V(1).Info("got SIGCHLD, send msg to libxl")
			HyperxlSigchldHandler(ctx.Ctx)
		}
	}()
	signal.Notify(sigchan, syscall.SIGCHLD)

	xd := &XenDriver{
		Ctx:          ctx.Ctx,
		Logger:       ctx.Logger,
		Version:      ctx.Version,
		Capabilities: ctx.Capabilities,
	}

	xd.domains = make(map[uint32]*hypervisor.VmContext)

	globalDriver = xd
	return globalDriver
}
Exemplo n.º 4
0
func qmpCommander(handler chan QmpInteraction, conn *net.UnixConn, session *QmpSession, feedback chan QmpInteraction) {
	glog.V(1).Info("Begin process command session")
	for _, cmd := range session.commands {
		msg, err := json.Marshal(*cmd)
		if err != nil {
			handler <- qmpFail("cannot marshal command", session.callback)
			return
		}

		success := false
		var qe *QmpError = nil
		for repeat := 0; !success && repeat < 3; repeat++ {

			if len(cmd.Scm) > 0 {
				glog.V(1).Infof("send cmd with scm (%d bytes) (%d) %s", len(cmd.Scm), repeat+1, string(msg))
				f, _ := conn.File()
				fd := f.Fd()
				syscall.Sendmsg(int(fd), msg, cmd.Scm, nil, 0)
			} else {
				glog.V(1).Infof("sending command (%d) %s", repeat+1, string(msg))
				conn.Write(msg)
			}

			res, ok := <-feedback
			if !ok {
				glog.Info("QMP command result chan closed")
				return
			}
			switch res.MessageType() {
			case QMP_RESULT:
				success = true
				break
			//success
			case QMP_ERROR:
				glog.Warning("got one qmp error")
				qe = res.(*QmpError)
				time.Sleep(1000 * time.Millisecond)
			case QMP_INTERNAL_ERROR:
				glog.Info("QMP quit... commander quit... ")
				return
			}
		}

		if !success {
			handler <- qe.Finish(session.callback)
			return
		}
	}
	handler <- session.Finish()
	return
}
Exemplo n.º 5
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := UnixSocketConnect(ctx.ConsoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.ConsoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.ConsoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.ConsoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.ConsoleSockName)

	cout := make(chan string, 128)
	go TtyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Exemplo n.º 6
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	var autoremove bool = false
	podArgs := job.Args[0]
	if job.Args[1] == "yes" {
		autoremove = true
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 7
0
func stateCleaning(ctx *VmContext, ev VmEvent) {
	if processed := commonStateHandler(ctx, ev, false); processed {
	} else if processed, success := deviceRemoveHandler(ctx, ev); processed {
		if !success {
			glog.Warning("fail to unplug devices for stop")
			ctx.poweroffVM(true, "fail to unplug devices")
			ctx.Become(stateDestroying, "DESTROYING")
		} else if ctx.deviceReady() {
			//            ctx.reset()
			//            ctx.unsetTimeout()
			//            ctx.reportPodStopped()
			//            glog.V(1).Info("device ready, could run pod.")
			//            ctx.Become(stateInit, "INIT")
			ctx.vm <- &DecodedMessage{
				code:    INIT_READY,
				message: []byte{},
			}
			glog.V(1).Info("device ready, could run pod.")
		}
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.poweroffVM(true, "fail to unplug devices")
		ctx.Become(stateDestroying, "DESTROYING")
	} else {
		switch ev.Event() {
		case COMMAND_RELEASE:
			glog.Info("vm cleaning to idle, got release, quit")
			ctx.reportVmShutdown()
			ctx.Become(stateDestroying, "DESTROYING")
		case EVENT_VM_TIMEOUT:
			glog.Warning("VM did not exit in time, try to stop it")
			ctx.poweroffVM(true, "pod stopp/unplug timeout")
			ctx.Become(stateDestroying, "DESTROYING")
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[cleaning] Got reply to %d: '%s'", ack.reply, string(ack.msg))
			if ack.reply == INIT_READY {
				ctx.reset()
				ctx.unsetTimeout()
				ctx.reportPodStopped()
				glog.Info("init has been acknowledged, could run pod.")
				ctx.Become(stateInit, "INIT")
			}
		default:
			glog.V(1).Info("got event message while cleaning")
		}
	}
}
Exemplo n.º 8
0
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
	cInfo []*ContainerInfo, vInfo []*VolumeInfo) {

	ctx.lock.Lock()
	defer ctx.lock.Unlock()

	for i := 0; i < ctx.InterfaceCount; i++ {
		ctx.progress.adding.networks[i] = true
	}

	if cInfo == nil {
		cInfo = []*ContainerInfo{}
	}

	if vInfo == nil {
		vInfo = []*VolumeInfo{}
	}

	ctx.initVolumeMap(spec)

	if glog.V(3) {
		for i, c := range cInfo {
			glog.Infof("#%d Container Info:", i)
			b, err := json.MarshalIndent(c, "...|", "    ")
			if err == nil {
				glog.Info("\n", string(b))
			}
		}
	}

	containers := make([]VmContainer, len(spec.Containers))

	for i, container := range spec.Containers {
		ctx.initContainerInfo(i, &containers[i], &container)
		ctx.setContainerInfo(i, &containers[i], cInfo[i])

		if spec.Tty {
			containers[i].Tty = ctx.attachId
			ctx.attachId++
			ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
		}
	}

	ctx.vmSpec = &VmPod{
		Hostname:   spec.Name,
		Containers: containers,
		Interfaces: nil,
		Routes:     nil,
		ShareDir:   ShareDirTag,
	}

	for _, vol := range vInfo {
		ctx.setVolumeInfo(vol)
	}

	ctx.userSpec = spec
	ctx.wg = wg
}
Exemplo n.º 9
0
func stateDestroying(ctx *VmContext, ev VmEvent) {
	if processed, _ := deviceRemoveHandler(ctx, ev); processed {
		if closed := ctx.tryClose(); closed {
			glog.Info("resources reclaimed, quit...")
		}
	} else {
		switch ev.Event() {
		case EVENT_VM_EXIT:
			glog.Info("Got VM shutdown event")
			ctx.unsetTimeout()
			if closed := ctx.onVmExit(false); closed {
				glog.Info("VM Context closed.")
			}
		case EVENT_VM_KILL:
			glog.Info("Got VM force killed message")
			ctx.unsetTimeout()
			if closed := ctx.onVmExit(true); closed {
				glog.Info("VM Context closed.")
			}
		case ERROR_INTERRUPTED:
			glog.V(1).Info("Connection interrupted while destroying")
		case COMMAND_RELEASE:
			glog.Info("vm destroying, got release")
			ctx.reportVmShutdown()
		case EVENT_VM_TIMEOUT:
			glog.Info("Device removing timeout")
			ctx.Close()
		default:
			glog.Warning("got event during vm cleaning up")
		}
	}
}
Exemplo n.º 10
0
func (qc *QemuContext) Kill(ctx *hypervisor.VmContext) {
	defer func() {
		err := recover()
		if glog.V(1) && err != nil {
			glog.Info("kill qemu, but channel has already been closed")
		}
	}()
	qc.wdt <- "kill"
}
Exemplo n.º 11
0
// state machine
func commonStateHandler(ctx *VmContext, ev VmEvent, hasPod bool) bool {
	processed := true
	switch ev.Event() {
	case EVENT_VM_EXIT:
		glog.Info("Got VM shutdown event, go to cleaning up")
		ctx.unsetTimeout()
		if closed := ctx.onVmExit(hasPod); !closed {
			ctx.Become(stateDestroying, "DESTROYING")
		}
	case ERROR_INTERRUPTED:
		glog.Info("Connection interrupted, quit...")
		ctx.exitVM(true, "connection to VM broken", false, false)
		ctx.onVmExit(hasPod)
	case COMMAND_SHUTDOWN:
		glog.Info("got shutdown command, shutting down")
		ctx.exitVM(false, "", hasPod, ev.(*ShutdownCommand).Wait)
	default:
		processed = false
	}
	return processed
}
Exemplo n.º 12
0
func stateTerminating(ctx *VmContext, ev VmEvent) {
	switch ev.Event() {
	case EVENT_VM_EXIT:
		glog.Info("Got VM shutdown event while terminating, go to cleaning up")
		ctx.unsetTimeout()
		if closed := ctx.onVmExit(true); !closed {
			ctx.Become(stateDestroying, "DESTROYING")
		}
	case EVENT_VM_KILL:
		glog.Info("Got VM force killed message, go to cleaning up")
		ctx.unsetTimeout()
		if closed := ctx.onVmExit(true); !closed {
			ctx.Become(stateDestroying, "DESTROYING")
		}
	case COMMAND_RELEASE:
		glog.Info("vm terminating, got release")
		ctx.reportVmShutdown()
	case COMMAND_ACK:
		ack := ev.(*CommandAck)
		glog.V(1).Infof("[Terminating] Got reply to %d: '%s'", ack.reply, string(ack.msg))
		if ack.reply == INIT_DESTROYPOD {
			glog.Info("POD destroyed ", string(ack.msg))
			ctx.poweroffVM(false, "")
		}
	case ERROR_CMD_FAIL:
		ack := ev.(*CommandError)
		if ack.context.code == INIT_DESTROYPOD {
			glog.Warning("Destroy pod failed")
			ctx.poweroffVM(true, "Destroy pod failed")
		}
	case EVENT_VM_TIMEOUT:
		glog.Warning("VM did not exit in time, try to stop it")
		ctx.poweroffVM(true, "vm terminating timeout")
	case ERROR_INTERRUPTED:
		glog.V(1).Info("Connection interrupted while terminating")
	default:
		glog.V(1).Info("got event during terminating")
	}
}
Exemplo n.º 13
0
func (pts *pseudoTtys) ptyConnect(ctx *VmContext, container int, session uint64, tty *TtyIO) {

	pts.lock.Lock()
	if ta, ok := pts.ttys[session]; ok {
		ta.attach(tty)
	} else {
		pts.ttys[session] = newAttachmentsWithTty(container, false, tty)
	}
	pts.lock.Unlock()

	if tty.Stdin != nil {
		go func() {
			buf := make([]byte, 32)
			defer pts.Detach(ctx, session, tty)
			defer func() { recover() }()
			for {
				nr, err := tty.Stdin.Read(buf)
				if err != nil {
					glog.Info("a stdin closed, ", err.Error())
					return
				} else if nr == 1 && buf[0] == ExitChar {
					glog.Info("got stdin detach char, exit term")
					return
				}

				glog.V(3).Infof("trying to input char: %d and %d chars", buf[0], nr)

				mbuf := make([]byte, nr)
				copy(mbuf, buf[:nr])
				pts.channel <- &ttyMessage{
					session: session,
					message: mbuf[:nr],
				}
			}
		}()
	}

	return
}
Exemplo n.º 14
0
func stateInit(ctx *VmContext, ev VmEvent) {
	if processed := commonStateHandler(ctx, ev, false); processed {
		//processed by common
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during init environment")
		ctx.Become(stateDestroying, "DESTROYING")
	} else {
		switch ev.Event() {
		case EVENT_VM_START_FAILED:
			glog.Error("VM did not start up properly, go to cleaning up")
			ctx.reportVmFault("VM did not start up properly, go to cleaning up")
			ctx.Close()
		case EVENT_INIT_CONNECTED:
			glog.Info("begin to wait vm commands")
			ctx.reportVmRun()
		case COMMAND_RELEASE:
			glog.Info("no pod on vm, got release, quit.")
			ctx.shutdownVM(false, "")
			ctx.Become(stateDestroying, "DESTRYING")
			ctx.reportVmShutdown()
		case COMMAND_EXEC:
			ctx.execCmd(ev.(*ExecCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			ctx.setWindowSize(cmd.ClientTag, cmd.Size)
		case COMMAND_RUN_POD, COMMAND_REPLACE_POD:
			glog.Info("got spec, prepare devices")
			if ok := ctx.prepareDevice(ev.(*RunPodCommand)); ok {
				ctx.setTimeout(60)
				ctx.Become(stateStarting, "STARTING")
			}
		case COMMAND_GET_POD_IP:
			ctx.reportPodIP()
		default:
			glog.Warning("got event during pod initiating")
		}
	}
}
Exemplo n.º 15
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
	qemu := qc.driver.executable
	if qemu == "" {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
		return
	}

	args := qc.arguments(ctx)

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	pipe := make([]int, 2)
	err := syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
		return
	}
}
Exemplo n.º 16
0
func Init() {
	if daemonCfg.LogConfig.Config == nil {
		daemonCfg.LogConfig.Config = make(map[string]string)
	}
	daemonCfg.InstallFlags()
	registryCfg.InstallFlags()
	hyperd.NewDockerImpl = func() (docker hyperd.DockerInterface, e error) {
		docker, e = NewDocker()
		if e != nil {
			return nil, fmt.Errorf("failed to create docker instance")
		}
		return docker, nil
	}
	glog.Info("success to create docker")
}
Exemplo n.º 17
0
func NewDocker() (*Docker, error) {
	registryService := registry.NewService(registryCfg)
	daemonCfg.TrustKeyPath = getDaemonConfDir() + "/" + defaultTrustKeyFile
	daemonCfg.Root = utils.HYPER_ROOT
	d, err := daemon.NewDaemon(daemonCfg, registryService)
	if err != nil {
		glog.Errorf("Error starting daemon: %v", err)
		return nil, err
	}

	glog.Info("Daemon has completed initialization")
	return &Docker{
		daemon: d,
	}, nil
}
Exemplo n.º 18
0
func statePodStopping(ctx *VmContext, ev VmEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
	} else {
		switch ev.Event() {
		case COMMAND_RELEASE:
			glog.Info("pod stopping, got release, quit.")
			ctx.unsetTimeout()
			ctx.shutdownVM(false, "got release, quit")
			ctx.Become(stateTerminating, "TERMINATING")
			ctx.reportVmShutdown()
		case COMMAND_ACK, EVENT_POD_FINISH:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[Stopping] got init ack to %d", ack.reply)
			if ack.reply == INIT_STOPPOD {
				glog.Info("POD stopped ", string(ack.msg))
				ctx.detachDevice()
				ctx.Become(stateCleaning, "CLEANING")
			}
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_STOPPOD {
				ctx.unsetTimeout()
				ctx.shutdownVM(true, "Stop pod failed as init report")
				ctx.Become(stateTerminating, "TERMINATING")
				glog.Error("Stop pod failed as init report")
			}
		case EVENT_VM_TIMEOUT:
			reason := "stopping POD timeout"
			ctx.shutdownVM(true, reason)
			ctx.Become(stateTerminating, "TERMINATING")
			glog.Error(reason)
		default:
			glog.Warning("got unexpected event during pod stopping")
		}
	}
}
Exemplo n.º 19
0
func stateRunning(ctx *VmContext, ev VmEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during reconnect to a running pod")
		ctx.Become(stateTerminating, "TERMINATING")
	} else {
		switch ev.Event() {
		case COMMAND_STOP_POD:
			ctx.stopPod()
			ctx.Become(statePodStopping, "STOPPING")
		case COMMAND_RELEASE:
			glog.Info("pod is running, got release command, let VM fly")
			ctx.Become(nil, "NONE")
			ctx.reportSuccess("", nil)
		case COMMAND_EXEC:
			ctx.execCmd(ev.(*ExecCommand))
		case COMMAND_ATTACH:
			ctx.attachCmd(ev.(*AttachCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			if ctx.userSpec.Tty {
				ctx.setWindowSize(cmd.ClientTag, cmd.Size)
			}
		case EVENT_POD_FINISH:
			result := ev.(*PodFinished)
			ctx.reportPodFinished(result)
			if ctx.Keep == types.VM_KEEP_NONE {
				ctx.exitVM(false, "", true, false)
			}
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[running] got init ack to %d", ack.reply)
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_EXECCMD {
				cmd := ExecCommand{}
				json.Unmarshal(ack.context.message, &cmd)
				ctx.ptys.Close(ctx, cmd.Sequence)
				glog.V(0).Infof("Exec command %s on session %d failed", cmd.Command[0], cmd.Sequence)
			}
		case COMMAND_GET_POD_IP:
			ctx.reportPodIP()
		default:
			glog.Warning("got unexpected event during pod running")
		}
	}
}
Exemplo n.º 20
0
func (ctx *VmContext) prepareDevice(cmd *RunPodCommand) bool {
	if len(cmd.Spec.Containers) != len(cmd.Containers) {
		ctx.reportBadRequest("Spec and Container Info mismatch")
		return false
	}

	ctx.InitDeviceContext(cmd.Spec, cmd.Wg, cmd.Containers, cmd.Volumes)

	if glog.V(2) {
		res, _ := json.MarshalIndent(*ctx.vmSpec, "    ", "    ")
		glog.Info("initial vm spec: ", string(res))
	}

	ctx.allocateDevices()

	return true
}
Exemplo n.º 21
0
func qmpReceiver(qmp chan QmpInteraction, wait chan int, decoder *json.Decoder) {
	glog.V(0).Info("Begin receive QMP message")
	for {
		rsp := &QmpResponse{}
		if err := decoder.Decode(rsp); err != nil {
			glog.Info("QMP exit as got error:", err.Error())
			qmp <- &QmpInternalError{cause: err.Error()}
			/* After Error report, send wait notification to close qmp channel */
			wait <- 1
			return
		}

		msg := rsp.msg
		qmp <- msg

		if msg.MessageType() == QMP_EVENT && msg.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
			glog.V(0).Info("Shutdown, quit QMP receiver")
			return
		}
	}
}
Exemplo n.º 22
0
Arquivo: lazy.go Projeto: cofyc/runv
func (ctx *VmContext) lazyPrepareDevice(cmd *RunPodCommand) bool {

	if len(cmd.Spec.Containers) != len(cmd.Containers) {
		ctx.reportBadRequest("Spec and Container Info mismatch")
		return false
	}

	ctx.InitDeviceContext(cmd.Spec, cmd.Wg, cmd.Containers, cmd.Volumes)

	if glog.V(2) {
		res, _ := json.MarshalIndent(*ctx.vmSpec, "    ", "    ")
		glog.Info("initial vm spec: ", string(res))
	}

	err := ctx.lazyAllocateNetworks()
	if err != nil {
		ctx.reportVmFault(err.Error())
		return false
	}
	ctx.lazyAddBlockDevices()

	return true
}
Exemplo n.º 23
0
//export hyperxl_log_cgo
func hyperxl_log_cgo(msg *C.char, len C.int) {
	if glog.V(1) {
		glog.Info("[libxl] ", C.GoStringN(msg, len))
	}
}
Exemplo n.º 24
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	var (
		autoremove  bool                = false
		tag         string              = ""
		ttys        []*hypervisor.TtyIO = []*hypervisor.TtyIO{}
		ttyCallback chan *types.VmResponse
	)
	podArgs := job.Args[0]
	if job.Args[1] == "yes" {
		autoremove = true
	}
	if len(job.Args) > 2 {
		tag = job.Args[2]
	}

	if tag != "" {
		glog.V(1).Info("Pod Run with client terminal tag: ", tag)
		ttyCallback = make(chan *types.VmResponse, 1)
		ttys = append(ttys, &hypervisor.TtyIO{
			Stdin:     job.Stdin,
			Stdout:    job.Stdout,
			ClientTag: tag,
			Callback:  ttyCallback,
		})
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	daemon.PodList.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodList.Unlock()
	code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE, ttys)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if len(ttys) > 0 {
		<-ttyCallback
		return nil
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 25
0
func daemon(cmd string, argv []string, pipe int) error {

	// create a subprocess
	pid, err := fork(false)
	if err != nil {
		return err
	} else if pid > 0 {
		go func() {
			wp, err := syscall.Wait4(int(pid), nil, 0, nil)
			if err == nil {
				glog.V(3).Infof("collect child %d", wp)
			} else {
				glog.Errorf("error during wait %d: %s", pid, err.Error())
			}
		}()
		// return the parent
		return nil
	}

	// exit the created one, create the daemon
	_, err = fork(true)
	if err != nil {
		glog.Error("second fork failed: ", err.Error())
		os.Exit(-1)
	}

	cur := os.Getpid()
	glog.V(1).Infof("qemu daemon pid %d.", cur)
	//Change the file mode mask
	_ = syscall.Umask(0)

	// create a new SID for the child process
	s_ret, err := syscall.Setsid()
	if err != nil {
		glog.Info("Error: syscall.Setsid errno: ", err.Error())
		os.Exit(-1)
	}
	if s_ret < 0 {
		glog.Errorf("setsid return negative value: %d", s_ret)
		os.Exit(-1)
	}

	os.Chdir("/")

	f, e := os.OpenFile("/dev/null", os.O_RDWR, 0)
	if e == nil {
		fd := f.Fd()
		syscall.Dup2(int(fd), int(os.Stdin.Fd()))
		syscall.Dup2(int(fd), int(os.Stdout.Fd()))
		syscall.Dup2(int(fd), int(os.Stderr.Fd()))
	}

	buf := make([]byte, 4)
	binary.BigEndian.PutUint32(buf, uint32(cur))
	syscall.Write(pipe, buf)
	syscall.Close(pipe)

	fds := listFd()
	for _, fd := range fds {
		if f, err := strconv.Atoi(fd); err == nil && f > 2 {
			glog.V(1).Infof("close fd %d", f)
			syscall.Close(f)
		}
	}

	err = syscall.Exec(cmd, argv, []string{})
	if err != nil {
		glog.Error("fail to exec qemu process")
		os.Exit(-1)
	}

	return nil
}
Exemplo n.º 26
0
func qmpHandler(ctx *hypervisor.VmContext) {

	go qmpInitializer(ctx)

	qc := ctx.DCtx.(*QemuContext)
	timer := time.AfterFunc(10*time.Second, func() {
		glog.Warning("Initializer Timeout.")
		qc.qmp <- &QmpTimeout{}
	})

	type msgHandler func(QmpInteraction)
	var handler msgHandler = nil
	var conn *net.UnixConn = nil

	buf := []*QmpSession{}
	res := make(chan QmpInteraction, 128)

	loop := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_SESSION:
			glog.Info("got new session")
			buf = append(buf, msg.(*QmpSession))
			if len(buf) == 1 {
				go qmpCommander(qc.qmp, conn, msg.(*QmpSession), res)
			}
		case QMP_FINISH:
			glog.Infof("session finished, buffer size %d", len(buf))
			r := msg.(*QmpFinish)
			if r.success {
				glog.V(1).Info("success ")
				if r.callback != nil {
					ctx.Hub <- r.callback
				}
			} else {
				reason := "unknown"
				if c, ok := r.reason["error"]; ok {
					reason = c.(string)
				}
				glog.Error("QMP command failed ", reason)
				ctx.Hub <- &hypervisor.DeviceFailed{
					Session: r.callback,
				}
			}
			buf = buf[1:]
			if len(buf) > 0 {
				go qmpCommander(qc.qmp, conn, buf[0], res)
			}
		case QMP_RESULT, QMP_ERROR:
			res <- msg
		case QMP_EVENT:
			ev := msg.(*QmpEvent)
			glog.V(1).Info("got QMP event ", ev.Type)
			if ev.Type == QMP_EVENT_SHUTDOWN {
				glog.Info("got QMP shutdown event, quit...")
				handler = nil
				ctx.Hub <- &hypervisor.VmExit{}
			}
		case QMP_INTERNAL_ERROR:
			res <- msg
			handler = nil
			glog.Info("QMP handler quit as received ", msg.(*QmpInternalError).cause)
			ctx.Hub <- &hypervisor.Interrupted{Reason: msg.(*QmpInternalError).cause}
		case QMP_QUIT:
			handler = nil
		}
	}

	initializing := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_INIT:
			timer.Stop()
			init := msg.(*QmpInit)
			conn = init.conn
			handler = loop
			glog.Info("QMP initialzed, go into main QMP loop")

			//routine for get message
			go qmpReceiver(qc.qmp, qc.waitQmp, init.decoder)
			if len(buf) > 0 {
				go qmpCommander(qc.qmp, conn, buf[0], res)
			}
		case QMP_FINISH:
			finish := msg.(*QmpFinish)
			if !finish.success {
				timer.Stop()
				ctx.Hub <- &hypervisor.InitFailedEvent{
					Reason: finish.reason["error"].(string),
				}
				handler = nil
				glog.Error("QMP initialize failed")
			}
		case QMP_TIMEOUT:
			ctx.Hub <- &hypervisor.InitFailedEvent{
				Reason: "QMP Init timeout",
			}
			handler = nil
			glog.Error("QMP initialize timeout")
		case QMP_SESSION:
			glog.Info("got new session during initializing")
			buf = append(buf, msg.(*QmpSession))
		}
	}

	handler = initializing

	for handler != nil {
		msg, ok := <-qc.qmp
		if !ok {
			glog.Info("QMP channel closed, Quit qmp handler")
			break
		}
		handler(msg)
	}
}
Exemplo n.º 27
0
func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
	// set up the tmpDir to use a canonical path
	tmp, err := tempDir(config.Root)
	if err != nil {
		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
	}
	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
	if err != nil {
		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
	}
	os.Setenv("TMPDIR", realTmp)

	// get the canonical path to the Docker root directory
	var realRoot string
	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
		realRoot = config.Root
	} else {
		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
		if err != nil {
			return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
		}
	}
	config.Root = realRoot
	// Create the root directory if it doesn't exists
	if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
		return nil, err
	}

	// Set the default driver
	graphdriver.DefaultDriver = config.GraphDriver

	// Load storage driver
	driver, err := graphdriver.New(config.Root, config.GraphOptions)
	if err != nil {
		return nil, fmt.Errorf("error initializing graphdriver: %v", err)
	}
	glog.V(1).Infof("Using graph driver %s", driver)

	d := &Daemon{}
	d.driver = driver

	defer func() {
		if err != nil {
			if err := d.Shutdown(); err != nil {
				glog.Error(err)
			}
		}
	}()

	daemonRepo := path.Join(config.Root, "containers")

	if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) {
		glog.Error(err.Error())
		return nil, err
	}

	glog.Info("Creating images graph")
	g, err := graph.NewGraph(path.Join(config.Root, "graph"), d.driver)
	if err != nil {
		glog.Error(err.Error())
		return nil, err
	}

	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
	if err != nil {
		glog.Error(err.Error())
		return nil, err
	}

	trustDir := path.Join(config.Root, "trust")
	if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) {
		glog.Error(err.Error())
		return nil, err
	}
	trustService, err := trust.NewTrustStore(trustDir)
	if err != nil {
		glog.Error(err.Error())
		return nil, fmt.Errorf("could not create trust store: %s", err)
	}

	eventsService := events.New()
	glog.Info("Creating repository list")
	tagCfg := &graph.TagStoreConfig{
		Graph:    g,
		Key:      trustKey,
		Registry: registryService,
		Events:   eventsService,
		Trust:    trustService,
	}
	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
	if err != nil {
		glog.Error(err.Error())
		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
	}

	graphdbPath := path.Join(config.Root, "linkgraph.db")
	graph, err := graphdb.NewSqliteConn(graphdbPath)
	if err != nil {
		glog.Error(err.Error())
		return nil, err
	}

	d.containerGraph = graph

	sysInfo := sysinfo.New(false)
	d.ID = trustKey.PublicKey().KeyID()
	d.repository = daemonRepo
	d.containers = &contStore{s: make(map[string]*Container)}
	d.graph = g
	d.repositories = repositories
	d.idIndex = truncindex.NewTruncIndex([]string{})
	d.sysInfo = sysInfo
	d.config = config
	d.sysInitPath = ""
	d.defaultLogConfig = config.LogConfig
	d.RegistryService = registryService
	d.EventsService = eventsService
	d.root = config.Root

	if err := d.restore(); err != nil {
		return nil, err
	}
	return d, nil
}
Exemplo n.º 28
0
func (daemon *Daemon) restore() error {
	type cr struct {
		container  *Container
		registered bool
	}

	var (
		currentDriver = daemon.driver.String()
		containers    = make(map[string]*cr)
	)

	dir, err := ioutil.ReadDir(daemon.repository)
	if err != nil {
		return err
	}

	for _, v := range dir {
		id := v.Name()
		container, err := daemon.load(id)
		if err != nil {
			glog.Errorf("Failed to load container %v: %v", id, err)
			continue
		}

		// Ignore the container if it does not support the current driver being used by the graph
		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
			glog.V(1).Infof("Loaded container %v", container.ID)

			containers[container.ID] = &cr{container: container}
		} else {
			glog.V(1).Infof("Cannot load container %s because it was created with another graph driver.", container.ID)
		}
	}

	if entities := daemon.containerGraph.List("/", -1); entities != nil {
		for _, p := range entities.Paths() {
			e := entities[p]

			if c, ok := containers[e.ID()]; ok {
				c.registered = true
			}
		}
	}

	group := sync.WaitGroup{}
	for _, c := range containers {
		group.Add(1)

		go func(container *Container, registered bool) {
			defer group.Done()

			if !registered {
				// Try to set the default name for a container if it exists prior to links
				container.Name, err = daemon.generateNewName(container.ID)
				if err != nil {
					glog.V(1).Infof("Setting default id - %s", err)
				}
			}

			if err := daemon.register(container, false); err != nil {
				glog.V(1).Infof("Failed to register container %s: %s", container.ID, err)
			}

			// check the restart policy on the containers and restart any container with
			// the restart policy of "always"
			if daemon.config.AutoRestart && container.shouldRestart() {
				glog.V(1).Infof("Starting container %s", container.ID)

				if err := container.Start(); err != nil {
					glog.V(1).Infof("Failed to start container %s: %s", container.ID, err)
				}
			}
		}(c.container, c.registered)
	}
	group.Wait()

	glog.Info("Loading containers: done.")

	return nil
}
Exemplo n.º 29
0
func stateStarting(ctx *VmContext, ev VmEvent) {
	if processed := commonStateHandler(ctx, ev, true); processed {
		//processed by common
	} else if processed := deviceInitHandler(ctx, ev); processed {
		if ctx.deviceReady() {
			glog.V(1).Info("device ready, could run pod.")
			ctx.startPod()
		}
	} else if processed := initFailureHandler(ctx, ev); processed {
		ctx.shutdownVM(true, "Fail during init pod running environment")
		ctx.Become(stateTerminating, "TERMINATING")
	} else {
		switch ev.Event() {
		case EVENT_VM_START_FAILED:
			glog.Info("VM did not start up properly, go to cleaning up")
			if closed := ctx.onVmExit(true); !closed {
				ctx.Become(stateDestroying, "DESTROYING")
			}
		case EVENT_INIT_CONNECTED:
			glog.Info("begin to wait vm commands")
			ctx.reportVmRun()
		case COMMAND_RELEASE:
			glog.Info("pod starting, got release, please wait")
			ctx.reportBusy("")
		case COMMAND_ATTACH:
			ctx.attachCmd(ev.(*AttachCommand))
		case COMMAND_WINDOWSIZE:
			cmd := ev.(*WindowSizeCommand)
			if ctx.userSpec.Tty {
				ctx.setWindowSize(cmd.ClientTag, cmd.Size)
			}
		case COMMAND_ACK:
			ack := ev.(*CommandAck)
			glog.V(1).Infof("[starting] got init ack to %d", ack.reply)
			if ack.reply == INIT_STARTPOD {
				ctx.unsetTimeout()
				var pinfo []byte = []byte{}
				persist, err := ctx.dump()
				if err == nil {
					buf, err := persist.serialize()
					if err == nil {
						pinfo = buf
					}
				}
				ctx.reportSuccess("Start POD success", pinfo)
				ctx.Become(stateRunning, "RUNNING")
				glog.Info("pod start success ", string(ack.msg))
			}
		case ERROR_CMD_FAIL:
			ack := ev.(*CommandError)
			if ack.context.code == INIT_STARTPOD {
				reason := "Start POD failed"
				ctx.shutdownVM(true, reason)
				ctx.Become(stateTerminating, "TERMINATING")
				glog.Error(reason)
			}
		case EVENT_VM_TIMEOUT:
			reason := "Start POD timeout"
			ctx.shutdownVM(true, reason)
			ctx.Become(stateTerminating, "TERMINATING")
			glog.Error(reason)
		default:
			glog.Warning("got event during pod initiating")
		}
	}
}
Exemplo n.º 30
0
func waitCmdToInit(ctx *VmContext, init *net.UnixConn) {
	looping := true
	cmds := []*DecodedMessage{}

	var data []byte
	var timeout bool = false
	var index int = 0
	var got int = 0
	var pingTimer *time.Timer = nil
	var pongTimer *time.Timer = nil

	go waitInitAck(ctx, init)

	for looping {
		cmd, ok := <-ctx.vm
		if !ok {
			glog.Info("vm channel closed, quit")
			break
		}
		if cmd.code == INIT_ACK || cmd.code == INIT_ERROR {
			if len(cmds) > 0 {
				if cmds[0].code == INIT_DESTROYPOD {
					glog.Info("got response of shutdown command, last round of command to init")
					looping = false
				}
				if cmd.code == INIT_ACK {
					if cmds[0].code != INIT_PING {
						ctx.Hub <- &CommandAck{
							reply: cmds[0].code,
							msg:   cmd.message,
						}
					}
				} else {
					ctx.Hub <- &CommandError{
						context: cmds[0],
						msg:     cmd.message,
					}
				}
				cmds = cmds[1:]

				if pongTimer != nil {
					glog.V(1).Info("ack got, clear pong timer")
					pongTimer.Stop()
					pongTimer = nil
				}
				if pingTimer == nil {
					pingTimer = time.AfterFunc(30*time.Second, func() {
						defer func() { recover() }()
						glog.V(1).Info("Send ping message to init")
						ctx.vm <- &DecodedMessage{
							code:    INIT_PING,
							message: []byte{},
						}
						pingTimer = nil
					})
				} else {
					pingTimer.Reset(30 * time.Second)
				}
			} else {
				glog.Error("got ack but no command in queue")
			}
		} else if cmd.code == INIT_FINISHPOD {
			num := len(cmd.message) / 4
			results := make([]uint32, num)
			for i := 0; i < num; i++ {
				results[i] = binary.BigEndian.Uint32(cmd.message[i*4 : i*4+4])
			}

			for _, c := range cmds {
				if c.code == INIT_DESTROYPOD {
					glog.Info("got pod finish message after having send destroy message")
					looping = false
					ctx.Hub <- &CommandAck{
						reply: c.code,
					}
					break
				}
			}

			glog.V(1).Infof("Pod finished, returned %d values", num)

			ctx.Hub <- &PodFinished{
				result: results,
			}
		} else {
			if cmd.code == INIT_NEXT {
				glog.V(1).Infof("get command NEXT")

				got += int(binary.BigEndian.Uint32(cmd.message[0:4]))
				glog.V(1).Infof("send %d, receive %d", index, got)
				timeout = false
				if index == got {
					/* received the sent out message */
					tmp := data[index:]
					data = tmp
					index = 0
					got = 0
				}
			} else {
				glog.V(1).Infof("send command %d to init, payload: '%s'.", cmd.code, string(cmd.message))
				cmds = append(cmds, cmd)
				data = append(data, newVmMessage(cmd)...)
				timeout = true
			}

			if index == 0 && len(data) != 0 {
				var end int = len(data)
				if end > 512 {
					end = 512
				}

				wrote, _ := init.Write(data[:end])
				glog.V(1).Infof("write %d to init, payload: '%s'.", wrote, data[:end])
				index += wrote
			}

			if timeout && pongTimer == nil {
				glog.V(1).Info("message sent, set pong timer")
				pongTimer = time.AfterFunc(30*time.Second, func() {
					ctx.Hub <- &Interrupted{Reason: "init not reply ping mesg"}
				})
			}
		}
	}

	if pingTimer != nil {
		pingTimer.Stop()
	}
	if pongTimer != nil {
		pongTimer.Stop()
	}
}