Example #1
0
File: vm.go Project: ZJU-SEL/runv
func GetVm(vmId string, b *BootConfig, waitStarted, lazy bool, keep int) (vm *Vm, err error) {
	var id string
	for {
		id = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
		if _, err = os.Stat(BaseDir + "/" + id); os.IsNotExist(err) {
			break
		}
	}
	vm = NewVm(id, b.CPU, b.Memory, lazy, keep)
	if err = vm.Launch(b); err != nil {
		return nil, err
	}

	if waitStarted {
		// wait init connected
		Status, err := vm.GetResponseChan()
		if err != nil {
			vm.Kill()
			return nil, err
		}
		defer vm.ReleaseResponseChan(Status)
		for {
			vmResponse, ok := <-Status
			if !ok || vmResponse.Code == types.E_VM_RUNNING {
				break
			}
		}
	}
	return vm, nil

}
Example #2
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	var autoremove bool = false
	podArgs := job.Args[0]
	if job.Args[1] == "yes" {
		autoremove = true
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #3
0
func (daemon *Daemon) CmdPodCreate(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]
	autoRemove := false
	if job.Args[1] == "yes" || job.Args[1] == "true" {
		autoRemove = true
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	daemon.PodList.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodList.Unlock()
	err := daemon.CreatePod(podId, podArgs, autoRemove)
	if err != nil {
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #4
0
File: runv.go Project: ZJU-SEL/runv
func runvAllocAndRespondTag(conn net.Conn) (tag string, err error) {
	tag = pod.RandStr(8, "alphanum")
	m := &hypervisor.DecodedMessage{
		Code:    RUNV_ACK,
		Message: []byte(tag),
	}
	data := hypervisor.NewVmMessage(m)
	conn.Write(data)

	return tag, nil
}
Example #5
0
File: pod.go Project: ZJU-SEL/hyper
func (daemon *Daemon) CreatePod(podId, podArgs string, autoremove bool) (*Pod, error) {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return nil, fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}

	if podId == "" {
		podId = fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	}

	return daemon.createPodInternal(podId, podArgs, autoremove, false)
}
Example #6
0
File: vm.go Project: sulochan/hyper
func (daemon *Daemon) NewVm(id string, cpu, memory int, lazy bool, keep int) *hypervisor.Vm {
	vmId := id

	if vmId == "" {
		for {
			vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
			if _, ok := daemon.VmList[vmId]; !ok {
				break
			}
		}
	}
	return hypervisor.NewVm(vmId, cpu, memory, lazy, keep)
}
Example #7
0
func New(templateRoot string, cpu, mem int, kernel, initrd string) base.Factory {
	var vmName string

	for {
		vmName = fmt.Sprintf("template-vm-%s", pod.RandStr(10, "alpha"))
		if _, err := os.Stat(templateRoot + "/" + vmName); os.IsNotExist(err) {
			break
		}
	}
	s, err := template.CreateTemplateVM(templateRoot+"/"+vmName, vmName, cpu, mem, kernel, initrd)
	if err != nil {
		glog.Infof("failed to create template factory: %v", err)
		glog.Infof("use direct factory instead")
		return direct.New(cpu, mem, kernel, initrd)
	}
	return &templateFactory{s: s}
}
Example #8
0
File: vm.go Project: juito/hyper
func (vl *VmList) NewVm(id string, cpu, memory int, lazy bool) *hypervisor.Vm {
	vmId := id

	vl.Lock()
	defer vl.Unlock()

	if vmId == "" {
		for {
			vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
			if _, ok := vl.vms[vmId]; !ok {
				break
			}
		}
		vl.vms[vmId] = nil
	}
	return hypervisor.NewVm(vmId, cpu, memory, lazy)
}
Example #9
0
File: vm.go Project: sulochan/hyper
func (daemon *Daemon) CmdVmCreate(job *engine.Job) (err error) {
	var (
		vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))
		vm   *hypervisor.Vm
		cpu  = 1
		mem  = 128
	)

	if job.Args[0] != "" {
		cpu, err = strconv.Atoi(job.Args[0])
		if err != nil {
			return err
		}
	}

	if job.Args[1] != "" {
		mem, err = strconv.Atoi(job.Args[1])
		if err != nil {
			return err
		}
	}

	vm, err = daemon.StartVm(vmId, cpu, mem, false, 0)
	if err != nil {
		return err
	}

	daemon.AddVm(vm)

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", vmId)
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #10
0
File: run.go Project: juito/hyper
func (daemon *Daemon) CreatePod(podId string, podSpec *apitypes.UserPod) (*Pod, error) {
	if podId == "" {
		podId = fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	}

	p, err := daemon.createPodInternal(podId, podSpec, false)
	if err != nil {
		return nil, err
	}

	/* Create pod may change the pod spec */
	spec, err := json.Marshal(p.Spec)
	if err != nil {
		return nil, err
	}

	if err = daemon.AddPod(p, string(spec)); err != nil {
		return nil, err
	}

	return p, nil
}
Example #11
0
// Setup lo ip address
// options for operation: add or del
func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error {
	command := "ip addr " + operation + " dev lo " + ip + "/32"
	execcmd, err := json.Marshal(strings.Split(command, " "))
	if err != nil {
		return err
	}

	tty := &hypervisor.TtyIO{
		Callback:  make(chan *types.VmResponse, 1),
		ClientTag: pod.RandStr(8, "alphanum"),
	}

	if err := vm.Exec(tty, container, string(execcmd)); err != nil {
		return err
	}

	if tty.ExitCode != 0 {
		return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, tty.ExitCode)
	}

	return nil
}
Example #12
0
func ApplyServices(vm *hypervisor.Vm, container string, services []pod.UserService) error {
	// Update lo ip addresses
	var command []string
	oldServices, err := GetServices(vm, container)
	if err != nil {
		return err
	}
	err = UpdateLoopbackAddress(vm, container, oldServices, services)
	if err != nil {
		return err
	}

	// Update haproxy config
	config := path.Join(ServiceVolume, ServiceConfig)
	vm.WriteFile(container, config, GenerateServiceConfig(services))

	command = append(command, "sh")
	command = append(command, "-c")
	command = append(command, "haproxy -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf `cat /var/run/haproxy.pid`")
	execcmd, err := json.Marshal(command)
	if err != nil {
		return err
	}

	tty := &hypervisor.TtyIO{
		Callback:  make(chan *types.VmResponse, 1),
		ClientTag: pod.RandStr(8, "alphanum"),
	}

	if err := vm.Exec(tty, container, string(execcmd)); err != nil {
		return err
	}

	if tty.ExitCode != 0 {
		return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, tty.ExitCode)
	}

	return nil
}
Example #13
0
func (daemon *Daemon) CmdPodCreate(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	podArgs := job.Args[0]

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	err := daemon.CreatePod(podId, podArgs, nil, false)
	if err != nil {
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #14
0
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) {
	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	userPod := pod.ConvertOCF2PureUserPod(spec)
	podStatus := hypervisor.NewPod(podId, userPod, nil)

	cpu := defaultCpus
	if userPod.Resource.Vcpu > 0 {
		cpu = userPod.Resource.Vcpu
	}
	mem := defaultMemory
	if userPod.Resource.Memory > 0 {
		mem = userPod.Resource.Memory
	}

	kernel := chooseKernel(spec)
	initrd := chooseInitrd(spec)
	glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem)

	var (
		vm  *hypervisor.Vm
		err error
	)
	if len(kernel) == 0 && len(initrd) == 0 {
		vm, err = f.GetVm(cpu, mem)
		if err != nil {
			glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with default kernel config")
	} else if len(kernel) == 0 || len(initrd) == 0 {
		// if user specify a kernel, they must specify an initrd at the same time
		return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa")
	} else {
		boot := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: kernel,
			Initrd: initrd,
		}

		vm, err = hypervisor.GetVm("", boot, true, false)
		if err != nil {
			glog.V(1).Infof("Create VM failed: %s", err.Error())
			return nil, err
		}
		glog.V(3).Infof("Creating VM with specific kernel config")
	}

	Response := vm.StartPod(podStatus, userPod, nil, nil)
	if Response.Data == nil {
		vm.Kill()
		glog.V(1).Infof("StartPod fail: QEMU response data is nil\n")
		return nil, fmt.Errorf("StartPod fail")
	}
	glog.V(1).Infof("result: code %d %s\n", Response.Code, Response.Cause)

	hp := &HyperPod{
		userPod:    userPod,
		podStatus:  podStatus,
		vm:         vm,
		Containers: make(map[string]*Container),
		Processes:  make(map[string]*Process),
	}

	// create Listener process running in its own netns
	if err = hp.startNsListener(); err != nil {
		hp.reap()
		glog.V(1).Infof("start ns listener fail: %s\n", err.Error())
		return nil, err
	}

	return hp, nil
}
Example #15
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	var (
		autoremove  bool                = false
		tag         string              = ""
		ttys        []*hypervisor.TtyIO = []*hypervisor.TtyIO{}
		ttyCallback chan *types.VmResponse
	)
	podArgs := job.Args[0]
	if job.Args[1] == "yes" {
		autoremove = true
	}
	if len(job.Args) > 2 {
		tag = job.Args[2]
	}

	if tag != "" {
		glog.V(1).Info("Pod Run with client terminal tag: ", tag)
		ttyCallback = make(chan *types.VmResponse, 1)
		ttys = append(ttys, &hypervisor.TtyIO{
			Stdin:     job.Stdin,
			Stdout:    job.Stdout,
			ClientTag: tag,
			Callback:  ttyCallback,
		})
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	daemon.PodList.Lock()
	glog.V(2).Infof("lock PodList")
	defer glog.V(2).Infof("unlock PodList")
	defer daemon.PodList.Unlock()
	code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE, ttys)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if len(ttys) > 0 {
		<-ttyCallback
		return nil
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Example #16
0
func main() {
	hypervisor.InterfaceCount = 0

	var containerInfoList []*hypervisor.ContainerInfo
	var roots []string
	var containerId string
	var err error

	ocffile := flag.String("config", "", "ocf configure file")
	kernel := flag.String("kernel", "", "hyper kernel")
	initrd := flag.String("initrd", "", "hyper initrd")
	vbox := flag.String("vbox", "", "vbox boot iso")
	driver := flag.String("driver", "", "hypervisor driver")

	flag.Parse()

	if *ocffile == "" {
		*ocffile = "config.json"
	}

	if _, err = os.Stat(*ocffile); os.IsNotExist(err) {
		fmt.Printf("Please specify ocffile or put config.json under current working directory\n")
		return
	}

	if *vbox == "" {
		*vbox = "./vbox.iso"
	}

	if _, err = os.Stat(*vbox); err == nil {
		*vbox, err = filepath.Abs(*vbox)
		if err != nil {
			fmt.Printf("Cannot get abs path for vbox: %s\n", err.Error())
			return
		}
	}

	if *kernel == "" {
		*kernel = "./kernel"
	}

	if _, err = os.Stat(*kernel); err == nil {
		*kernel, err = filepath.Abs(*kernel)
		if err != nil {
			fmt.Printf("Cannot get abs path for kernel: %s\n", err.Error())
			return
		}
	}

	if *initrd == "" {
		*initrd = "./initrd.img"
	}

	if _, err = os.Stat(*initrd); err == nil {
		*initrd, err = filepath.Abs(*initrd)
		if err != nil {
			fmt.Printf("Cannot get abs path for initrd: %s\n", err.Error())
			return
		}
	}

	if *driver == "" {
		*driver = "kvm"
		fmt.Printf("Use default hypervisor KVM\n")
	}

	if hypervisor.HDriver, err = driverloader.Probe(*driver); err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	vmId := fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))

	ocfData, err := ioutil.ReadFile(*ocffile)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	userPod, err := pod.OCFConvert2Pod(ocfData)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	mypod := hypervisor.NewPod(podId, userPod)

	var (
		cpu = 1
		mem = 128
	)

	if userPod.Resource.Vcpu > 0 {
		cpu = userPod.Resource.Vcpu
	}

	if userPod.Resource.Memory > 0 {
		mem = userPod.Resource.Memory
	}

	b := &hypervisor.BootConfig{
		Kernel: *kernel,
		Initrd: *initrd,
		Bios:   "",
		Cbfs:   "",
		Vbox:   *vbox,
		CPU:    cpu,
		Memory: mem,
	}

	vm := hypervisor.NewVm(vmId, cpu, mem, false, types.VM_KEEP_NONE)
	err = vm.Launch(b)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return
	}

	sharedDir := path.Join(hypervisor.BaseDir, vm.Id, hypervisor.ShareDirTag)

	for _, c := range userPod.Containers {
		var root string
		var err error

		containerId = GenerateRandomID()
		rootDir := path.Join(sharedDir, containerId)
		os.MkdirAll(rootDir, 0755)

		rootDir = path.Join(rootDir, "rootfs")

		if !filepath.IsAbs(c.Image) {
			root, err = filepath.Abs(c.Image)
			if err != nil {
				fmt.Printf("%s\n", err.Error())
				return
			}
		} else {
			root = c.Image
		}

		err = mount(root, rootDir)
		if err != nil {
			fmt.Printf("mount %s to %s failed: %s\n", root, rootDir, err.Error())
			return
		}
		roots = append(roots, rootDir)

		containerInfo := &hypervisor.ContainerInfo{
			Id:     containerId,
			Rootfs: "rootfs",
			Image:  containerId,
			Fstype: "dir",
		}

		containerInfoList = append(containerInfoList, containerInfo)
		mypod.AddContainer(containerId, podId, "", []string{}, types.S_POD_CREATED)
	}

	qemuResponse := vm.StartPod(mypod, userPod, containerInfoList, nil)
	if qemuResponse.Data == nil {
		fmt.Printf("StartPod fail: QEMU response data is nil\n")
		return
	}
	fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause)

	inFd, _ := term.GetFdInfo(os.Stdin)
	outFd, isTerminalOut := term.GetFdInfo(os.Stdout)

	oldState, err := term.SetRawTerminal(inFd)
	if err != nil {
		return
	}

	height, width := getTtySize(outFd, isTerminalOut)
	winSize := &hypervisor.WindowSize{
		Row:    uint16(height),
		Column: uint16(width),
	}

	tag := pod.RandStr(8, "alphanum")

	monitorTtySize(vm, tag, outFd, isTerminalOut)

	vm.Attach(os.Stdin, os.Stdout, tag, containerId, winSize)

	qemuResponse = vm.StopPod(mypod, "yes")

	term.RestoreTerminal(inFd, oldState)

	for _, root := range roots {
		umount(root)
	}

	if qemuResponse.Data == nil {
		fmt.Printf("StopPod fail: QEMU response data is nil\n")
		return
	}
	fmt.Printf("result: code %d %s\n", qemuResponse.Code, qemuResponse.Cause)
}
Example #17
0
func (daemon *Daemon) ParsePod(mypod *hypervisor.Pod, userPod *pod.UserPod,
	vmId string) ([]*hypervisor.ContainerInfo, []*hypervisor.VolumeInfo, error) {
	var (
		fstype            string
		poolName          string
		volPoolName       string
		devPrefix         string
		storageDriver     string
		rootPath          string
		devFullName       string
		rootfs            string
		uid               string
		gid               string
		err               error
		sharedDir         = path.Join(hypervisor.BaseDir, vmId, hypervisor.ShareDirTag)
		containerInfoList = []*hypervisor.ContainerInfo{}
		volumeInfoList    = []*hypervisor.VolumeInfo{}
		cli               = daemon.DockerCli
	)

	storageDriver = daemon.Storage.StorageType
	if storageDriver == "devicemapper" {
		poolName = daemon.Storage.PoolName
		fstype = daemon.Storage.Fstype
		volPoolName = "hyper-volume-pool"
		devPrefix = poolName[:strings.Index(poolName, "-pool")]
		rootPath = "/var/lib/docker/devicemapper"
		rootfs = "/rootfs"
	} else if storageDriver == "aufs" {
		rootPath = daemon.Storage.RootPath
		fstype = daemon.Storage.Fstype
		rootfs = ""
	} else if storageDriver == "overlay" {
		rootPath = daemon.Storage.RootPath
		fstype = daemon.Storage.Fstype
		rootfs = ""
	} else if storageDriver == "vbox" {
		fstype = daemon.Storage.Fstype
		rootPath = daemon.Storage.RootPath
		rootfs = "/rootfs"
		_ = devPrefix
		_ = volPoolName
		_ = poolName
	}

	// Process the 'Files' section
	files := make(map[string](pod.UserFile))
	for _, v := range userPod.Files {
		files[v.Name] = v
	}

	for i, c := range mypod.Containers {
		var jsonResponse *dockertypes.ContainerJSONRaw
		if jsonResponse, err = cli.GetContainerInfo(c.Id); err != nil {
			glog.Error("got error when get container Info ", err.Error())
			return nil, nil, err
		}
		if c.Name == "" {
			c.Name = jsonResponse.Name
		}
		if c.Image == "" {
			c.Image = jsonResponse.Config.Image
		}

		if storageDriver == "devicemapper" {
			if err := dm.CreateNewDevice(c.Id, devPrefix, rootPath); err != nil {
				return nil, nil, err
			}
			devFullName, err = dm.MountContainerToSharedDir(c.Id, sharedDir, devPrefix)
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return nil, nil, err
			}
			fstype, err = dm.ProbeFsType(devFullName)
			if err != nil {
				fstype = "ext4"
			}
		} else if storageDriver == "aufs" {
			devFullName, err = aufs.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "")
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return nil, nil, err
			}
			devFullName = "/" + c.Id + "/rootfs"
		} else if storageDriver == "overlay" {
			devFullName, err = overlay.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "")
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return nil, nil, err
			}
			devFullName = "/" + c.Id + "/rootfs"
		} else if storageDriver == "vbox" {
			devFullName, err = vbox.MountContainerToSharedDir(c.Id, rootPath, "")
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return nil, nil, err
			}
			fstype = "ext4"
		}

		for _, f := range userPod.Containers[i].Files {
			targetPath := f.Path
			file, ok := files[f.Filename]
			if !ok {
				continue
			}
			var fromFile = "/tmp/" + file.Name
			defer os.RemoveAll(fromFile)
			if file.Uri != "" {
				err = utils.DownloadFile(file.Uri, fromFile)
				if err != nil {
					return nil, nil, err
				}
			} else if file.Contents != "" {
				err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666)
				if err != nil {
					return nil, nil, err
				}
			} else {
				continue
			}
			// we need to decode the content
			fi, err := os.Open(fromFile)
			if err != nil {
				return nil, nil, err
			}
			defer fi.Close()
			fileContent, err := ioutil.ReadAll(fi)
			if err != nil {
				return nil, nil, err
			}
			if file.Encoding == "base64" {
				newContent, err := utils.Base64Decode(string(fileContent))
				if err != nil {
					return nil, nil, err
				}
				err = ioutil.WriteFile(fromFile, []byte(newContent), 0666)
				if err != nil {
					return nil, nil, err
				}
			} else {
				err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666)
				if err != nil {
					return nil, nil, err
				}
			}
			// get the uid and gid for that attached file
			fileUser := f.User
			fileGroup := f.Group
			u, _ := user.Current()
			if fileUser == "" {
				uid = u.Uid
			} else {
				u, _ = user.Lookup(fileUser)
				uid = u.Uid
				gid = u.Gid
			}
			if fileGroup == "" {
				gid = u.Gid
			}

			if storageDriver == "devicemapper" {
				err := dm.AttachFiles(c.Id, devPrefix, fromFile, targetPath, rootPath, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return nil, nil, err
				}
			} else if storageDriver == "aufs" {
				err := aufs.AttachFiles(c.Id, fromFile, targetPath, sharedDir, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return nil, nil, err
				}
			} else if storageDriver == "overlay" {
				err := overlay.AttachFiles(c.Id, fromFile, targetPath, sharedDir, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return nil, nil, err
				}
			}
		}

		env := make(map[string]string)
		for _, v := range jsonResponse.Config.Env {
			env[v[:strings.Index(v, "=")]] = v[strings.Index(v, "=")+1:]
		}
		for _, e := range userPod.Containers[i].Envs {
			env[e.Env] = e.Value
		}
		glog.V(1).Infof("Parsing envs for container %d: %d Evs", i, len(env))
		glog.V(1).Infof("The fs type is %s", fstype)
		glog.V(1).Infof("WorkingDir is %s", string(jsonResponse.Config.WorkingDir))
		glog.V(1).Infof("Image is %s", string(devFullName))
		containerInfo := &hypervisor.ContainerInfo{
			Id:         c.Id,
			Rootfs:     rootfs,
			Image:      devFullName,
			Fstype:     fstype,
			Workdir:    jsonResponse.Config.WorkingDir,
			Entrypoint: jsonResponse.Config.Entrypoint.Slice(),
			Cmd:        jsonResponse.Config.Cmd.Slice(),
			Envs:       env,
		}
		glog.V(1).Infof("Container Info is \n%v", containerInfo)
		containerInfoList = append(containerInfoList, containerInfo)
		glog.V(1).Infof("container %d created %s, workdir %s, env: %v", i, c.Id, jsonResponse.Config.WorkingDir, env)
	}

	// Process the 'Volumes' section
	for _, v := range userPod.Volumes {
		if v.Source == "" {
			if storageDriver == "devicemapper" {
				volName := fmt.Sprintf("%s-%s-%s", volPoolName, mypod.Id, v.Name)
				dev_id, _ := daemon.GetVolumeId(mypod.Id, volName)
				glog.Error("DeviceID is %d", dev_id)
				if dev_id < 1 {
					dev_id, _ = daemon.GetMaxDeviceId()
					err := daemon.CreateVolume(mypod.Id, volName, fmt.Sprintf("%d", dev_id+1), false)
					if err != nil {
						return nil, nil, err
					}
				} else {
					err := daemon.CreateVolume(mypod.Id, volName, fmt.Sprintf("%d", dev_id), true)
					if err != nil {
						return nil, nil, err
					}
				}

				fstype, err = dm.ProbeFsType("/dev/mapper/" + volName)
				if err != nil {
					fstype = "ext4"
				}
				myVol := &hypervisor.VolumeInfo{
					Name:     v.Name,
					Filepath: path.Join("/dev/mapper/", volName),
					Fstype:   fstype,
					Format:   "raw",
				}
				volumeInfoList = append(volumeInfoList, myVol)
				glog.V(1).Infof("volume %s created with dm as %s", v.Name, volName)
				continue

			} else {
				// Make sure the v.Name is given
				v.Source = path.Join("/var/tmp/hyper/", v.Name)
				if _, err := os.Stat(v.Source); err != nil && os.IsNotExist(err) {
					if err := os.MkdirAll(v.Source, os.FileMode(0777)); err != nil {
						return nil, nil, err
					}
				}
				v.Driver = "vfs"
			}
		}

		if v.Driver != "vfs" {
			glog.V(1).Infof("bypass %s volume %s", v.Driver, v.Name)
			continue
		}

		// Process the situation if the source is not NULL, we need to bind that dir to sharedDir
		var flags uintptr = utils.MS_BIND

		mountSharedDir := pod.RandStr(10, "alpha")
		targetDir := path.Join(sharedDir, mountSharedDir)
		glog.V(1).Infof("trying to bind dir %s to %s", v.Source, targetDir)

		if runtime.GOOS == "linux" {
			if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) {
				glog.Errorf("error to create dir %s for volume %s", targetDir, v.Name)
				return nil, nil, err
			}
		}

		if err := utils.Mount(v.Source, targetDir, "dir", flags, "--bind"); err != nil {
			glog.Errorf("bind dir %s failed: %s", v.Source, err.Error())
			return nil, nil, err
		}
		myVol := &hypervisor.VolumeInfo{
			Name:     v.Name,
			Filepath: mountSharedDir,
			Fstype:   "dir",
			Format:   "",
		}
		glog.V(1).Infof("dir %s is bound to %s", v.Source, targetDir)
		volumeInfoList = append(volumeInfoList, myVol)
	}

	return containerInfoList, volumeInfoList, nil
}
Example #18
0
File: runv.go Project: ZJU-SEL/runv
func startRunvPod(context *nsContext, config *startConfig) (err error) {
	context.lock.Lock()
	defer context.lock.Unlock()
	if context.firstConfig == nil {
		context.firstConfig = config
	} else {
		// check stopped
		if len(context.actives) == 0 {
			return fmt.Errorf("The namespace service was stopped")
		}
		// check match
		if config.Root != "" && config.Root != context.firstConfig.Root {
			return fmt.Errorf("The root is not match")
		}
		if config.Driver != "" && config.Driver != context.firstConfig.Driver {
			return fmt.Errorf("The driver is not match")
		}
		if config.Kernel != "" && config.Kernel != context.firstConfig.Kernel {
			return fmt.Errorf("The kernel is not match")
		}
		if config.Initrd != "" && config.Initrd != context.firstConfig.Initrd {
			return fmt.Errorf("The initrd is not match")
		}
		if config.Vbox != "" && config.Vbox != context.firstConfig.Vbox {
			return fmt.Errorf("The vbox is not match")
		}
		// check shared namespace
		for _, ns := range config.LinuxRuntimeSpec.Linux.Namespaces {
			if ns.Path == "" {
				continue
			}
			_, ok := context.actives[ns.Path]
			if !ok {
				return fmt.Errorf("Cann't share namespace with: %s", ns.Path)
			}
		}
		// OK, the pod has been started, add this config and return
		context.actives[config.Name] = config
		return nil
	}

	hypervisor.InterfaceCount = 0

	driver := config.Driver
	if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil {
		fmt.Printf("%s\n", err.Error())
		return err
	}

	context.podId = fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))
	context.vmId = fmt.Sprintf("vm-%s", pod.RandStr(10, "alpha"))

	context.userPod = pod.ConvertOCF2PureUserPod(&config.LinuxSpec, &config.LinuxRuntimeSpec)
	context.podStatus = hypervisor.NewPod(context.podId, context.userPod)
	context.vm, err = startVm(config, context.userPod, context.vmId)
	if err != nil {
		fmt.Printf("%s\n", err.Error())
		return err
	}

	Response := context.vm.StartPod(context.podStatus, context.userPod, nil, nil)
	if Response.Data == nil {
		fmt.Printf("StartPod fail: QEMU response data is nil\n")
		return fmt.Errorf("StartPod fail")
	}
	fmt.Printf("result: code %d %s\n", Response.Code, Response.Cause)

	context.actives[config.Name] = config
	return nil
}
Example #19
0
func (daemon *Daemon) PrepareVolume(mypod *hypervisor.Pod, userPod *pod.UserPod,
	vmId string) ([]*hypervisor.VolumeInfo, error) {
	var (
		fstype         string
		volPoolName    string
		err            error
		sharedDir      = path.Join(hypervisor.BaseDir, vmId, hypervisor.ShareDirTag)
		volumeInfoList = []*hypervisor.VolumeInfo{}
	)

	// Process the 'Volumes' section
	for _, v := range userPod.Volumes {
		if v.Source == "" {
			if daemon.Storage.StorageType == "devicemapper" {
				volName := fmt.Sprintf("%s-%s-%s", volPoolName, mypod.Id, v.Name)
				dev_id, _ := daemon.GetVolumeId(mypod.Id, volName)
				glog.Error("DeviceID is %d", dev_id)
				if dev_id < 1 {
					dev_id, _ = daemon.GetMaxDeviceId()
					err := daemon.CreateVolume(mypod.Id, volName, fmt.Sprintf("%d", dev_id+1), false)
					if err != nil {
						return nil, err
					}
				} else {
					err := daemon.CreateVolume(mypod.Id, volName, fmt.Sprintf("%d", dev_id), true)
					if err != nil {
						return nil, err
					}
				}

				fstype, err = dm.ProbeFsType("/dev/mapper/" + volName)
				if err != nil {
					fstype = "ext4"
				}
				myVol := &hypervisor.VolumeInfo{
					Name:     v.Name,
					Filepath: path.Join("/dev/mapper/", volName),
					Fstype:   fstype,
					Format:   "raw",
				}
				volumeInfoList = append(volumeInfoList, myVol)
				glog.V(1).Infof("volume %s created with dm as %s", v.Name, volName)
				continue
			} else {
				// Make sure the v.Name is given
				v.Source = path.Join("/var/tmp/hyper/", v.Name)
				if _, err := os.Stat(v.Source); err != nil && os.IsNotExist(err) {
					if err := os.MkdirAll(v.Source, os.FileMode(0777)); err != nil {
						return nil, err
					}
				}
				v.Driver = "vfs"
			}
		}

		if v.Driver != "vfs" {
			glog.V(1).Infof("bypass %s volume %s", v.Driver, v.Name)
			continue
		}

		// Process the situation if the source is not NULL, we need to bind that dir to sharedDir
		var flags uintptr = utils.MS_BIND

		mountSharedDir := pod.RandStr(10, "alpha")
		targetDir := path.Join(sharedDir, mountSharedDir)
		glog.V(1).Infof("trying to bind dir %s to %s", v.Source, targetDir)

		if runtime.GOOS == "linux" {
			if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) {
				glog.Errorf("error to create dir %s for volume %s", targetDir, v.Name)
				return nil, err
			}
		}

		if err := utils.Mount(v.Source, targetDir, "dir", flags, "--bind"); err != nil {
			glog.Errorf("bind dir %s failed: %s", v.Source, err.Error())
			return nil, err
		}
		myVol := &hypervisor.VolumeInfo{
			Name:     v.Name,
			Filepath: mountSharedDir,
			Fstype:   "dir",
			Format:   "",
		}
		glog.V(1).Infof("dir %s is bound to %s", v.Source, targetDir)
		volumeInfoList = append(volumeInfoList, myVol)
	}

	return volumeInfoList, nil
}
Example #20
0
File: pod.go Project: ZJU-SEL/hyper
// ContainerAttach attaches streams to the container cID. If stream is true, it streams the output.
func (d Docker) ContainerAttach(cId string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
	tag := pod.RandStr(8, "alphanum")
	return d.Daemon.Attach(stdin, ioutils.NopWriteCloser(stdout), "container", cId, tag)
}