Exemplo n.º 1
0
func deviceRemoveHandler(ctx *VmContext, ev QemuEvent) (bool, bool) {
	processed := true
	success := true
	switch ev.Event() {
	case EVENT_CONTAINER_DELETE:
		success = ctx.onContainerRemoved(ev.(*ContainerUnmounted))
		glog.V(1).Info("Unplug container return with ", success)
	case EVENT_INTERFACE_DELETE:
		success = ctx.onInterfaceRemoved(ev.(*InterfaceReleased))
		glog.V(1).Info("Unplug interface return with ", success)
	case EVENT_BLOCK_EJECTED:
		success = ctx.onVolumeRemoved(ev.(*VolumeUnmounted))
		glog.V(1).Info("Unplug block device return with ", success)
	case EVENT_VOLUME_DELETE:
		success = ctx.onBlockReleased(ev.(*BlockdevRemovedEvent))
		glog.V(1).Info("release volume return with ", success)
	case EVENT_INTERFACE_EJECTED:
		n := ev.(*NetDevRemovedEvent)
		nic := ctx.devices.networkMap[n.Index]
		glog.V(1).Infof("release %d interface: %s", n.Index, nic.IpAddr)
		go ReleaseInterface(n.Index, nic.IpAddr, nic.Fd, ctx.hub)
	default:
		processed = false
	}
	return processed, success
}
Exemplo n.º 2
0
func watchDog(ctx *VmContext) {
	for {
		msg, ok := <-ctx.wdt
		if ok {
			switch msg {
			case "quit":
				glog.V(1).Info("quit watch dog.")
				return
			case "kill":
				success := false
				if ctx.process != nil {
					glog.V(0).Infof("kill Qemu... %d", ctx.process.Pid)
					if err := ctx.process.Kill(); err == nil {
						success = true
					}
				} else {
					glog.Warning("no process to be killed")
				}
				ctx.hub <- &QemuKilledEvent{success: success}
				return
			}
		} else {
			glog.V(1).Info("chan closed, quit watch dog.")
			break
		}
	}
}
Exemplo n.º 3
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := unixSocketConnect(ctx.consoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.consoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.consoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.consoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.consoleSockName)

	cout := make(chan string, 128)
	go ttyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Exemplo n.º 4
0
func watchDog(qc *QemuContext, hub chan hypervisor.VmEvent) {
	wdt := qc.wdt
	for {
		msg, ok := <-wdt
		if ok {
			switch msg {
			case "quit":
				glog.V(1).Info("quit watch dog.")
				return
			case "kill":
				success := false
				if qc.process != nil {
					glog.V(0).Infof("kill Qemu... %d", qc.process.Pid)
					if err := qc.process.Kill(); err == nil {
						success = true
					}
				} else {
					glog.Warning("no process to be killed")
				}
				hub <- &hypervisor.VmKilledEvent{Success: success}
				return
			}
		} else {
			glog.V(1).Info("chan closed, quit watch dog.")
			break
		}
	}
}
Exemplo n.º 5
0
func (qmp *QmpResponse) UnmarshalJSON(raw []byte) error {
	var tmp map[string]interface{}
	var err error = nil
	json.Unmarshal(raw, &tmp)
	glog.V(2).Info("got a message ", string(raw))
	if _, ok := tmp["event"]; ok {
		msg := &QmpEvent{}
		err = json.Unmarshal(raw, msg)
		glog.V(2).Info("got event: ", msg.Type)
		qmp.msg = msg
	} else if r, ok := tmp["return"]; ok {
		msg := &QmpResult{}
		switch r.(type) {
		case string:
			msg.Return = map[string]interface{}{
				"return": r.(string),
			}
		default:
			err = json.Unmarshal(raw, msg)
		}
		qmp.msg = msg
	} else if _, ok := tmp["error"]; ok {
		msg := &QmpError{}
		err = json.Unmarshal(raw, msg)
		qmp.msg = msg
	}
	return err
}
Exemplo n.º 6
0
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		// log the request
		glog.V(0).Infof("Calling %s %s\n", localMethod, localRoute)

		if logging {
			glog.V(1).Infof("%s %s\n", r.Method, r.RequestURI)
		}

		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
				glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
			}
		}
		version := version.Version(mux.Vars(r)["version"])
		if version == "" {
			version = utils.APIVERSION
		}
		if corsHeaders != "" {
			writeCorsHeaders(w, r, corsHeaders)
		}

		if version.GreaterThan(utils.APIVERSION) {
			http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound)
			return
		}

		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
			glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
			httpError(w, err)
		}
	}
}
Exemplo n.º 7
0
Arquivo: vm.go Projeto: huangqg/hyper
func (daemon *Daemon) KillVm(vmId string) (int, string, error) {
	qemuPodEvent, qemuStatus, subQemuStatus, err := daemon.GetQemuChan(vmId)
	if err != nil {
		return -1, "", err
	}
	var qemuResponse *types.QemuResponse
	shutdownPodEvent := &qemu.ShutdownCommand{Wait: false}
	qemuPodEvent.(chan qemu.QemuEvent) <- shutdownPodEvent
	// wait for the qemu response
	for {
		stop := 0
		select {
		case qemuResponse = <-qemuStatus.(chan *types.QemuResponse):
			glog.V(1).Infof("Got response: %d: %s", qemuResponse.Code, qemuResponse.Cause)
			if qemuResponse.Code == types.E_VM_SHUTDOWN {
				stop = 1
			}
		case qemuResponse = <-subQemuStatus.(chan *types.QemuResponse):
			glog.V(1).Infof("Got response: %d: %s", qemuResponse.Code, qemuResponse.Cause)
			if qemuResponse.Code == types.E_VM_SHUTDOWN {
				stop = 1
			}
		}
		if stop == 1 {
			break
		}
	}
	close(qemuStatus.(chan *types.QemuResponse))
	close(subQemuStatus.(chan *types.QemuResponse))
	daemon.RemoveVm(vmId)
	daemon.DeleteQemuChan(vmId)

	return qemuResponse.Code, qemuResponse.Cause, nil
}
Exemplo n.º 8
0
func (cli *DockerCli) SendCmdPull(args ...string) ([]byte, int, error) {
	// We need to create a container via an image object.  If the image
	// is not stored locally, so we need to pull the image from the Docker HUB.

	// Get a Repository name and tag name from the argument, but be careful
	// with the Repository name with a port number.  For example:
	//      localdomain:5000/samba/hipache:latest
	image := args[0]
	repos, tag := parseTheGivenImageName(image)
	if tag == "" {
		tag = "latest"
	}

	// Pull the image from the docker HUB
	v := url.Values{}
	v.Set("fromImage", repos)
	v.Set("tag", tag)
	glog.V(3).Infof("The Repository is %s, and the tag is %s\n", repos, tag)
	glog.V(3).Info("pull the image from the repository!\n")
	err := cli.Stream("POST", "/images/create?"+v.Encode(), nil, os.Stdout, nil)
	if err != nil {
		return nil, -1, err
	}
	return nil, 200, nil
}
Exemplo n.º 9
0
Arquivo: job.go Projeto: huangqg/hyper
// Run executes the job and blocks until the job completes.
// If the job fails it returns an error
func (job *Job) Run() (err error) {
	defer func() {
		// Wait for all background tasks to complete
		if job.closeIO {
			if err := job.Stdout.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stderr.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stdin.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
		}
	}()

	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
		return fmt.Errorf("engine is shutdown")
	}
	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
	// everytime the daemon is cleanly restarted.
	// The permanent fix is to implement Job.Stop and Job.OnStop so that
	// ServeApi can cooperate and terminate cleanly.
	if job.Name != "serveapi" {
		job.Eng.l.Lock()
		job.Eng.tasks.Add(1)
		job.Eng.l.Unlock()
		defer job.Eng.tasks.Done()
	}
	// FIXME: make this thread-safe
	// FIXME: implement wait
	if !job.end.IsZero() {
		return fmt.Errorf("%s: job has already completed", job.Name)
	}
	// Log beginning and end of the job
	if job.Eng.Logging {
		glog.V(0).Infof("+job %s\n", job.CallString())
		defer func() {
			okerr := "OK"
			if err != nil {
				okerr = fmt.Sprintf("ERR: %s", err)
			}
			glog.V(0).Infof("-job %s %s\n", job.CallString(), okerr)
		}()
	}

	if job.handler == nil {
		return fmt.Errorf("%s: command not found\n", job.Name)
	}

	var errorMessage = bytes.NewBuffer(nil)
	job.Stderr.Add(errorMessage)

	err = job.handler(job)
	job.end = time.Now()

	return
}
Exemplo n.º 10
0
func (daemon *Daemon) shutdown() error {
	glog.V(0).Info("The daemon will be shutdown")
	glog.V(0).Info("Shutdown all VMs")
	for vm, _ := range daemon.vmList {
		daemon.KillVm(vm)
	}
	(daemon.db).Close()
	glog.Flush()
	return nil
}
Exemplo n.º 11
0
func InitNetwork(bIface, bIP string) error {
	if bIface == "" {
		BridgeIface = defaultBridgeIface
	} else {
		BridgeIface = bIface
	}

	if bIP == "" {
		BridgeIP = defaultBridgeIP
	} else {
		BridgeIP = bIP
	}

	addr, err := GetIfaceAddr(BridgeIface)
	if err != nil {
		glog.V(1).Infof("create bridge %s, ip %s", BridgeIface, BridgeIP)
		// No Bridge existent, create one

		// If the iface is not found, try to create it
		if err := configureBridge(BridgeIP, BridgeIface); err != nil {
			glog.Error("create bridge failed")
			return err
		}

		addr, err = GetIfaceAddr(BridgeIface)
		if err != nil {
			glog.Error("get iface addr failed\n")
			return err
		}

		bridgeIPv4Net = addr.(*net.IPNet)
	} else {
		glog.V(1).Info("bridge exist\n")
		// Validate that the bridge ip matches the ip specified by BridgeIP
		bridgeIPv4Net = addr.(*net.IPNet)

		if BridgeIP != "" {
			bip, _, err := net.ParseCIDR(BridgeIP)
			if err != nil {
				return err
			}
			if !bridgeIPv4Net.Contains(bip) {
				return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", addr, bip)
			}
		}
	}

	err = setupIPTables(addr)
	if err != nil {
		return err
	}

	ipAllocator.RequestIP(bridgeIPv4Net, bridgeIPv4Net.IP)
	return nil
}
Exemplo n.º 12
0
func (ctx *VmContext) Lookup(container string) int {
	if container == "" {
		return -1
	}
	for idx, c := range ctx.vmSpec.Containers {
		if c.Id == container {
			glog.V(1).Infof("found container %s at %d", container, idx)
			return idx
		}
	}
	glog.V(1).Infof("can not found container %s", container)
	return -1
}
Exemplo n.º 13
0
func (ctx *VmContext) onVolumeRemoved(v *VolumeUnmounted) bool {
	if _, ok := ctx.progress.deleting.volumes[v.Name]; ok {
		glog.V(1).Infof("volume %s umounted", v.Name)
		delete(ctx.progress.deleting.volumes, v.Name)
	}
	vol := ctx.devices.volumeMap[v.Name]
	if vol.info.fstype != "" {
		glog.V(1).Info("need remove dm file ", vol.info.filename)
		ctx.progress.deleting.blockdevs[vol.info.name] = true
		go UmountDMDevice(vol.info.filename, vol.info.name, ctx.Hub)
	}
	return v.Success
}
Exemplo n.º 14
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(ctx *VmContext) {
	qemu, err := exec.LookPath("qemu-system-x86_64")
	if err != nil {
		ctx.hub <- &QemuExitEvent{message: "can not find qemu executable"}
		return
	}

	args := ctx.QemuArguments()

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	go waitConsoleOutput(ctx)

	pipe := make([]int, 2)
	err = syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.hub <- &QemuExitEvent{message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.hub <- &QemuExitEvent{message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.watchPid(int(pid))
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.hub <- &QemuExitEvent{message: "watch qemu process failed"}
		return
	}
}
Exemplo n.º 15
0
func qmpCommander(handler chan QmpInteraction, conn *net.UnixConn, session *QmpSession, feedback chan QmpInteraction) {
	glog.V(1).Info("Begin process command session")
	for _, cmd := range session.commands {
		msg, err := json.Marshal(*cmd)
		if err != nil {
			handler <- qmpFail("cannot marshal command", session.callback)
			return
		}

		success := false
		var qe *QmpError = nil
		for repeat := 0; !success && repeat < 3; repeat++ {

			if len(cmd.Scm) > 0 {
				glog.V(1).Infof("send cmd with scm (%d bytes) (%d) %s", len(cmd.Scm), repeat+1, string(msg))
				f, _ := conn.File()
				fd := f.Fd()
				syscall.Sendmsg(int(fd), msg, cmd.Scm, nil, 0)
			} else {
				glog.V(1).Infof("sending command (%d) %s", repeat+1, string(msg))
				conn.Write(msg)
			}

			res, ok := <-feedback
			if !ok {
				glog.Info("QMP command result chan closed")
				return
			}
			switch res.MessageType() {
			case QMP_RESULT:
				success = true
				break
			//success
			case QMP_ERROR:
				glog.Warning("got one qmp error")
				qe = res.(*QmpError)
				time.Sleep(1000 * time.Millisecond)
			case QMP_INTERNAL_ERROR:
				glog.Info("QMP quit... commander quit... ")
				return
			}
		}

		if !success {
			handler <- qe.Finish(session.callback)
			return
		}
	}
	handler <- session.Finish()
	return
}
Exemplo n.º 16
0
func (ctx *VmContext) loop() {
	for ctx.handler != nil {
		ev, ok := <-ctx.hub
		if !ok {
			glog.Error("hub chan has already been closed")
			break
		} else if ev == nil {
			glog.V(1).Info("got nil event.")
			continue
		}
		glog.V(1).Infof("main event loop got message %d(%s)", ev.Event(), EventString(ev.Event()))
		ctx.handler(ctx, ev)
	}
}
Exemplo n.º 17
0
func (cli *DockerCli) SendCmdDelete(args ...string) ([]byte, int, error) {
	container := args[0]
	glog.V(1).Infof("Prepare to delete the container : %s", container)
	v := url.Values{}
	v.Set("v", "1")
	v.Set("force", "1")
	_, statusCode, err := readBody(cli.Call("DELETE", "/containers/"+container+"?"+v.Encode(), nil, nil))
	if err != nil {
		return nil, statusCode, fmt.Errorf("Error to remove the container(%s), %s", container, err.Error())
	}
	glog.V(1).Infof("status code is %d", statusCode)

	return nil, statusCode, nil
}
Exemplo n.º 18
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
	qemu := qc.driver.executable
	if qemu == "" {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
		return
	}

	args := qc.arguments(ctx)

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	pipe := make([]int, 2)
	err := syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
		return
	}
}
Exemplo n.º 19
0
func (daemon *Daemon) CmdPodCreate(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]

	wg := new(sync.WaitGroup)
	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	err := daemon.CreatePod(podArgs, podId, wg)
	if err != nil {
		return err
	}
	if err := daemon.WritePodAndContainers(podId); err != nil {
		glog.V(1).Info("Found an error while saveing the Containers info")
		return err
	}

	// Prepare the qemu status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 20
0
func postContainerCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	if err := r.ParseForm(); err != nil {
		return nil
	}

	glog.V(1).Infof("Image name is %s\n", r.Form.Get("imageName"))
	job := eng.Job("create", r.Form.Get("imageName"))
	stdoutBuf := bytes.NewBuffer(nil)
	stderrBuf := bytes.NewBuffer(nil)

	job.Stdout.Add(stdoutBuf)
	job.Stderr.Add(stderrBuf)
	if err := job.Run(); err != nil {
		return err
	}

	var (
		env             engine.Env
		dat             map[string]interface{}
		returnedJSONstr string
	)
	returnedJSONstr = engine.Tail(stdoutBuf, 1)
	if err := json.Unmarshal([]byte(returnedJSONstr), &dat); err != nil {
		return err
	}

	env.Set("ContainerID", dat["ContainerID"].(string))
	return writeJSONEnv(w, http.StatusCreated, env)
}
Exemplo n.º 21
0
// Install installs daemon capabilities to eng.
func (daemon *Daemon) Install(eng *engine.Engine) error {
	// Now, we just install a command 'info' to set/get the information of the docker and Hyper daemon
	for name, method := range map[string]engine.Handler{
		"info":              daemon.CmdInfo,
		"version":           daemon.CmdVersion,
		"create":            daemon.CmdCreate,
		"pull":              daemon.CmdPull,
		"podCreate":         daemon.CmdPodCreate,
		"podStart":          daemon.CmdPodStart,
		"podInfo":           daemon.CmdPodInfo,
		"podRm":             daemon.CmdPodRm,
		"podRun":            daemon.CmdPodRun,
		"podStop":           daemon.CmdPodStop,
		"vmCreate":          daemon.CmdVmCreate,
		"vmKill":            daemon.CmdVmKill,
		"list":              daemon.CmdList,
		"exec":              daemon.CmdExec,
		"attach":            daemon.CmdAttach,
		"tty":               daemon.CmdTty,
		"serveapi":          apiserver.ServeApi,
		"acceptconnections": apiserver.AcceptConnections,
	} {
		glog.V(3).Infof("Engine Register: name= %s\n", name)
		if err := eng.Register(name, method); err != nil {
			return err
		}
	}
	return nil
}
Exemplo n.º 22
0
func postPodRemove(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	if err := r.ParseForm(); err != nil {
		return nil
	}

	glog.V(1).Infof("Pod(%s) is process to be removed", r.Form.Get("podId"))
	job := eng.Job("podRm", r.Form.Get("podId"))
	stdoutBuf := bytes.NewBuffer(nil)

	job.Stdout.Add(stdoutBuf)

	if err := job.Run(); err != nil {
		return err
	}

	var (
		env             engine.Env
		dat             map[string]interface{}
		returnedJSONstr string
	)
	returnedJSONstr = engine.Tail(stdoutBuf, 1)
	if err := json.Unmarshal([]byte(returnedJSONstr), &dat); err != nil {
		return err
	}

	env.Set("ID", dat["ID"].(string))
	env.SetInt("Code", (int)(dat["Code"].(float64)))
	env.Set("Cause", dat["Cause"].(string))

	return writeJSONEnv(w, http.StatusOK, env)
}
Exemplo n.º 23
0
func TtyLiner(conn io.Reader, output chan string) {
	buf := make([]byte, 1)
	line := []byte{}
	cr := false
	emit := false
	for {

		nr, err := conn.Read(buf)
		if err != nil || nr < 1 {
			glog.V(1).Info("Input byte chan closed, close the output string chan")
			close(output)
			return
		}
		switch buf[0] {
		case '\n':
			emit = !cr
			cr = false
		case '\r':
			emit = true
			cr = true
		default:
			cr = false
			line = append(line, buf[0])
		}
		if emit {
			output <- string(line)
			line = []byte{}
			emit = false
		}
	}
}
Exemplo n.º 24
0
func (ctx *VmContext) removeInterface() {
	for idx, nic := range ctx.devices.networkMap {
		glog.V(1).Infof("remove network card %d: %s", idx, nic.IpAddr)
		ctx.progress.deleting.networks[idx] = true
		newNetworkDelSession(ctx, nic.DeviceName, &NetDevRemovedEvent{Index: idx})
	}
}
Exemplo n.º 25
0
func (ctx *VmContext) releaseNetwork() {
	for idx, nic := range ctx.devices.networkMap {
		glog.V(1).Infof("remove network card %d: %s", idx, nic.IpAddr)
		ctx.progress.deleting.networks[idx] = true
		go ReleaseInterface(idx, nic.IpAddr, nic.Fd, ctx.hub)
	}
}
Exemplo n.º 26
0
func (ctx *VmContext) setVolumeInfo(info *VolumeInfo) {

	vol, ok := ctx.devices.volumeMap[info.Name]
	if !ok {
		return
	}

	vol.info.filename = info.Filepath
	vol.info.format = info.Format

	if info.Fstype != "dir" {
		vol.info.fstype = info.Fstype
		ctx.progress.adding.blockdevs[info.Name] = true
	} else {
		vol.info.fstype = ""
		for i, mount := range vol.pos {
			glog.V(1).Infof("insert volume %s to %s on %d", info.Name, mount, i)
			ctx.vmSpec.Containers[i].Fsmap = append(ctx.vmSpec.Containers[i].Fsmap, VmFsmapDescriptor{
				Source:   info.Filepath,
				Path:     mount,
				ReadOnly: vol.readOnly[i],
			})
		}
	}
}
Exemplo n.º 27
0
func stateDestroying(ctx *VmContext, ev QemuEvent) {
	if processed, _ := deviceRemoveHandler(ctx, ev); processed {
		if closed := ctx.tryClose(); closed {
			glog.Info("resources reclaimed, quit...")
		}
	} else {
		switch ev.Event() {
		case EVENT_QMP_EVENT:
			if ev.(*QmpEvent).Type == QMP_EVENT_SHUTDOWN {
				glog.Info("Got QMP shutdown event")
				ctx.unsetTimeout()
				if closed := ctx.onQemuExit(false); closed {
					glog.Info("VM Context closed.")
				}
			}
		case EVENT_QEMU_KILL:
			glog.Info("Got Qemu force killed message")
			ctx.unsetTimeout()
			if closed := ctx.onQemuExit(true); closed {
				glog.Info("VM Context closed.")
			}
		case ERROR_INTERRUPTED:
			glog.V(1).Info("Connection interrupted while destroying")
		case COMMAND_RELEASE:
			glog.Info("vm destroying, got release")
			ctx.reportVmShutdown()
		case EVENT_QEMU_TIMEOUT:
			glog.Info("Device removing timeout")
			ctx.Close()
		default:
			glog.Warning("got event during vm cleaning up")
		}
	}
}
Exemplo n.º 28
0
func (ctx *VmContext) onBlockReleased(v *BlockdevRemovedEvent) bool {
	if _, ok := ctx.progress.deleting.blockdevs[v.Name]; ok {
		glog.V(1).Infof("blockdev %s deleted", v.Name)
		delete(ctx.progress.deleting.blockdevs, v.Name)
	}
	return v.Success
}
Exemplo n.º 29
0
func ReleasePortMaps(containerip string, maps []pod.UserContainerPort) error {
	if len(maps) == 0 {
		return nil
	}

	for _, m := range maps {
		var proto string

		glog.V(1).Infof("release port map %d", m.HostPort)
		portMapper.ReleaseMap(m.Protocol, m.HostPort)

		if strings.EqualFold(m.Protocol, "udp") {
			proto = "udp"
		} else {
			proto = "tcp"
		}

		natArgs := []string{"-p", proto, "-m", proto, "--dport",
			strconv.Itoa(m.HostPort), "-j", "DNAT", "--to-destination",
			net.JoinHostPort(containerip, strconv.Itoa(m.ContainerPort))}

		iptables.OperatePortMap(iptables.Delete, "HYPER", natArgs)

		filterArgs := []string{"-d", containerip, "-p", proto, "-m", proto,
			"--dport", strconv.Itoa(m.ContainerPort), "-j", "ACCEPT"}
		iptables.Raw(append([]string{"-D", "HYPER"}, filterArgs...)...)
	}
	/* forbid to map ports twice */
	return nil
}
Exemplo n.º 30
0
func (ctx *VmContext) removeDMDevice() {
	for name, container := range ctx.devices.imageMap {
		if container.info.fstype != "dir" {
			glog.V(1).Info("need remove dm file", container.info.filename)
			ctx.progress.deleting.blockdevs[name] = true
			go UmountDMDevice(container.info.filename, name, ctx.Hub)
		}
	}
	for name, vol := range ctx.devices.volumeMap {
		if vol.info.fstype != "" {
			glog.V(1).Info("need remove dm file ", vol.info.filename)
			ctx.progress.deleting.blockdevs[name] = true
			go UmountDMDevice(vol.info.filename, name, ctx.Hub)
		}
	}
}