示例#1
0
文件: dm.go 项目: kkpapa/hyper
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
	glog.Infof("/dev/mapper/%s", volName)
	if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
		return nil
	}
	if restore == false {
		parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}

	if restore == false {
		parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	return nil
}
示例#2
0
// InitDeviceContext will init device info in context
func (ctx *VmContext) InitDeviceContext(spec *pod.UserPod, wg *sync.WaitGroup,
	cInfo []*ContainerInfo, vInfo []*VolumeInfo) {

	ctx.lock.Lock()
	defer ctx.lock.Unlock()

	for i := 0; i < InterfaceCount; i++ {
		ctx.progress.adding.networks[i] = true
	}

	if cInfo == nil {
		cInfo = []*ContainerInfo{}
	}

	if vInfo == nil {
		vInfo = []*VolumeInfo{}
	}

	ctx.initVolumeMap(spec)

	if glog.V(3) {
		for i, c := range cInfo {
			glog.Infof("#%d Container Info:", i)
			b, err := json.MarshalIndent(c, "...|", "    ")
			if err == nil {
				glog.Info("\n", string(b))
			}
		}
	}

	containers := make([]VmContainer, len(spec.Containers))

	for i, container := range spec.Containers {

		ctx.initContainerInfo(i, &containers[i], &container)
		ctx.setContainerInfo(i, &containers[i], cInfo[i])

		if spec.Tty {
			containers[i].Tty = ctx.attachId
			ctx.attachId++
			ctx.ptys.ttys[containers[i].Tty] = newAttachments(i, true)
		}
	}

	ctx.vmSpec = &VmPod{
		Hostname:   spec.Name,
		Containers: containers,
		Interfaces: nil,
		Routes:     nil,
		ShareDir:   ShareDirTag,
	}

	for _, vol := range vInfo {
		ctx.setVolumeInfo(vol)
	}

	ctx.userSpec = spec
	ctx.wg = wg
}
示例#3
0
文件: qemu.go 项目: huangqg/hyper
func QemuAssociate(vmId string, hub chan QemuEvent, client chan *types.QemuResponse,
	wg *sync.WaitGroup, pack []byte) {

	if glog.V(1) {
		glog.Infof("VM %s trying to reload with serialized data: %s", vmId, string(pack))
	}

	pinfo, err := vmDeserialize(pack)
	if err != nil {
		client <- &types.QemuResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: err.Error(),
		}
		return
	}

	if pinfo.Id != vmId {
		client <- &types.QemuResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: "VM ID mismatch",
		}
		return
	}

	context, err := pinfo.vmContext(hub, client, wg)
	if err != nil {
		client <- &types.QemuResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: err.Error(),
		}
		return
	}

	go qmpHandler(context)
	go associateQemu(context)
	go waitPts(context)
	go connectToInit(context)

	context.Become(stateRunning, "RUNNING")

	context.loop()
}
示例#4
0
文件: xen.go 项目: BrianMowrey/hyper
func (xc *XenContext) Launch(ctx *hypervisor.VmContext) {
	//    go func(){
	extra := []string{
		"-device", fmt.Sprintf("virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=%d", PCI_AVAILABLE_ADDRESS),
		"-chardev", fmt.Sprintf("socket,id=charch0,path=%s,server,nowait", ctx.HyperSockName),
		"-device", "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charch0,id=channel0,name=sh.hyper.channel.0",
		"-chardev", fmt.Sprintf("socket,id=charch1,path=%s,server,nowait", ctx.TtySockName),
		"-device", "virtserialport,bus=virtio-serial0.0,nr=2,chardev=charch1,id=channel1,name=sh.hyper.channel.1",
		"-fsdev", fmt.Sprintf("local,id=virtio9p,path=%s,security_model=none", ctx.ShareDir),
		"-device", fmt.Sprintf("virtio-9p-pci,fsdev=virtio9p,mount_tag=%s", hypervisor.ShareDirTag),
	}
	domid, ev, err := XlStartDomain(xc.driver.Ctx, ctx.Id, ctx.Boot, ctx.HyperSockName+".test", ctx.TtySockName+".test", ctx.ConsoleSockName, extra)
	if err != nil {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()}
		return
	}
	xc.domId = domid
	xc.ev = ev
	glog.Infof("Start VM as domain %d", domid)
	xc.driver.domains[(uint32)(domid)] = ctx
	//    }()
}
示例#5
0
func waitCmdToInit(ctx *VmContext, init *net.UnixConn) {
	looping := true
	cmds := []*DecodedMessage{}

	var pingTimer *time.Timer = nil
	var pongTimer *time.Timer = nil

	go waitInitAck(ctx, init)

	for looping {
		cmd, ok := <-ctx.vm
		if !ok {
			glog.Info("vm channel closed, quit")
			break
		}
		if cmd.code == INIT_ACK || cmd.code == INIT_ERROR {
			if len(cmds) > 0 {
				if cmds[0].code == INIT_DESTROYPOD {
					glog.Info("got response of shutdown command, last round of command to init")
					looping = false
				}
				if cmd.code == INIT_ACK {
					if cmds[0].code != INIT_PING {
						ctx.Hub <- &CommandAck{
							reply: cmds[0].code,
							msg:   cmd.message,
						}
					}
				} else {
					ctx.Hub <- &CommandError{
						context: cmds[0],
						msg:     cmd.message,
					}
				}
				cmds = cmds[1:]

				if pongTimer != nil {
					glog.V(1).Info("ack got, clear pong timer")
					pongTimer.Stop()
					pongTimer = nil
				}
				if pingTimer == nil {
					pingTimer = time.AfterFunc(30*time.Second, func() {
						defer func() { recover() }()
						glog.V(1).Info("Send ping message to init")
						ctx.vm <- &DecodedMessage{
							code:    INIT_PING,
							message: []byte{},
						}
						pingTimer = nil
					})
				} else {
					pingTimer.Reset(30 * time.Second)
				}
			} else {
				glog.Error("got ack but no command in queue")
			}
		} else if cmd.code == INIT_FINISHPOD {
			num := len(cmd.message) / 4
			results := make([]uint32, num)
			for i := 0; i < num; i++ {
				results[i] = binary.BigEndian.Uint32(cmd.message[i*4 : i*4+4])
			}

			for _, c := range cmds {
				if c.code == INIT_DESTROYPOD {
					glog.Info("got pod finish message after having send destroy message")
					looping = false
					ctx.Hub <- &CommandAck{
						reply: c.code,
					}
					break
				}
			}

			glog.V(1).Infof("Pod finished, returned %d values", num)

			ctx.Hub <- &PodFinished{
				result: results,
			}
		} else {
			if glog.V(1) {
				glog.Infof("send command %d to init, payload: '%s'.", cmd.code, string(cmd.message))
			}
			init.Write(newVmMessage(cmd))
			cmds = append(cmds, cmd)
			if pongTimer == nil {
				glog.V(1).Info("message sent, set pong timer")
				pongTimer = time.AfterFunc(30*time.Second, func() {
					ctx.Hub <- &Interrupted{Reason: "init not reply ping mesg"}
				})
			}
		}
	}

	if pingTimer != nil {
		pingTimer.Stop()
	}
	if pongTimer != nil {
		pongTimer.Stop()
	}
}
示例#6
0
func qmpHandler(ctx *VmContext) {

	go qmpInitializer(ctx)

	timer := time.AfterFunc(10*time.Second, func() {
		glog.Warning("Initializer Timeout.")
		ctx.qmp <- &QmpTimeout{}
	})

	type msgHandler func(QmpInteraction)
	var handler msgHandler = nil
	var conn *net.UnixConn = nil

	buf := []*QmpSession{}
	res := make(chan QmpInteraction, 128)

	loop := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_SESSION:
			glog.Info("got new session")
			buf = append(buf, msg.(*QmpSession))
			if len(buf) == 1 {
				go qmpCommander(ctx.qmp, conn, msg.(*QmpSession), res)
			}
		case QMP_FINISH:
			glog.Infof("session finished, buffer size %d", len(buf))
			r := msg.(*QmpFinish)
			if r.success {
				glog.V(1).Info("success ")
				if r.callback != nil {
					ctx.hub <- r.callback
				}
			} else {
				reason := "unknown"
				if c, ok := r.reason["error"]; ok {
					reason = c.(string)
				}
				glog.Error("QMP command failed ", reason)
				ctx.hub <- &DeviceFailed{
					session: r.callback,
				}
			}
			buf = buf[1:]
			if len(buf) > 0 {
				go qmpCommander(ctx.qmp, conn, buf[0], res)
			}
		case QMP_RESULT, QMP_ERROR:
			res <- msg
		case QMP_EVENT:
			ev := msg.(*QmpEvent)
			ctx.hub <- ev
			if ev.Type == QMP_EVENT_SHUTDOWN {
				glog.Info("got QMP shutdown event, quit...")
				handler = nil
			}
		case QMP_INTERNAL_ERROR:
			res <- msg
			handler = nil
			glog.Info("QMP handler quit as received ", msg.(*QmpInternalError).cause)
			ctx.hub <- &Interrupted{reason: msg.(*QmpInternalError).cause}
		case QMP_QUIT:
			handler = nil
		}
	}

	initializing := func(msg QmpInteraction) {
		switch msg.MessageType() {
		case QMP_INIT:
			timer.Stop()
			init := msg.(*QmpInit)
			conn = init.conn
			handler = loop
			glog.Info("QMP initialzed, go into main QMP loop")

			//routine for get message
			go qmpReceiver(ctx.qmp, init.decoder)
			if len(buf) > 0 {
				go qmpCommander(ctx.qmp, conn, buf[0], res)
			}
		case QMP_FINISH:
			finish := msg.(*QmpFinish)
			if !finish.success {
				timer.Stop()
				ctx.hub <- &InitFailedEvent{
					reason: finish.reason["error"].(string),
				}
				handler = nil
				glog.Error("QMP initialize failed")
			}
		case QMP_TIMEOUT:
			ctx.hub <- &InitFailedEvent{
				reason: "QMP Init timeout",
			}
			handler = nil
			glog.Error("QMP initialize timeout")
		case QMP_SESSION:
			glog.Info("got new session during initializing")
			buf = append(buf, msg.(*QmpSession))
		}
	}

	handler = initializing

	for handler != nil {
		msg := <-ctx.qmp
		handler(msg)
	}
}