Esempio n. 1
0
func waitConsoleOutput(ctx *VmContext) {

	conn, err := UnixSocketConnect(ctx.ConsoleSockName)
	if err != nil {
		glog.Error("failed to connected to ", ctx.ConsoleSockName, " ", err.Error())
		return
	}

	glog.V(1).Info("connected to ", ctx.ConsoleSockName)

	tc, err := telnet.NewConn(conn)
	if err != nil {
		glog.Error("fail to init telnet connection to ", ctx.ConsoleSockName, ": ", err.Error())
		return
	}
	glog.V(1).Infof("connected %s as telnet mode.", ctx.ConsoleSockName)

	cout := make(chan string, 128)
	go TtyLiner(tc, cout)

	for {
		line, ok := <-cout
		if ok {
			glog.V(1).Info("[console] ", line)
		} else {
			glog.Info("console output end")
			break
		}
	}
}
Esempio n. 2
0
func waitInitReady(ctx *VmContext) {
	conn, err := UnixSocketConnect(ctx.HyperSockName)
	if err != nil {
		glog.Error("Cannot connect to hyper socket ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "Cannot connect to hyper socket " + err.Error(),
		}
		return
	}

	glog.Info("Wating for init messages...")

	msg, err := readVmMessage(conn.(*net.UnixConn))
	if err != nil {
		glog.Error("read init message failed... ", err.Error())
		ctx.Hub <- &InitFailedEvent{
			Reason: "read init message failed... " + err.Error(),
		}
		conn.Close()
	} else if msg.code == INIT_READY {
		glog.Info("Get init ready message")
		ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)}
		go waitCmdToInit(ctx, conn.(*net.UnixConn))
	} else {
		glog.Warningf("Get init message %d", msg.code)
		ctx.Hub <- &InitFailedEvent{
			Reason: fmt.Sprintf("Get init message %d", msg.code),
		}
		conn.Close()
	}
}
Esempio n. 3
0
func (b *Builder) readContext(context io.Reader) error {
	tmpdirPath, err := ioutil.TempDir("", "docker-build")
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	decompressedStream, err := archive.DecompressStream(context)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
		glog.Error(err.Error())
		return err
	}

	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
		glog.Error(err.Error())
		return err
	}

	b.contextPath = tmpdirPath
	return nil
}
Esempio n. 4
0
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
//  identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {

	if tarArchive == nil {
		return fmt.Errorf("Empty archive")
	}
	if options == nil {
		options = &archive.TarOptions{}
	}
	if options.ExcludePatterns == nil {
		options.ExcludePatterns = []string{}
	}

	dest = filepath.Clean(dest)
	if _, err := os.Stat(dest); os.IsNotExist(err) {
		if err := system.MkdirAll(dest, 0777); err != nil {
			glog.Error(err.Error())
			return err
		}
	}

	decompressedArchive, err := archive.DecompressStream(tarArchive)
	if err != nil {
		glog.Error(err.Error())
		return err
	}
	defer decompressedArchive.Close()

	return invokeUnpack(decompressedArchive, dest, options)
}
Esempio n. 5
0
func LazyVmLoop(vmId string, hub chan VmEvent, client chan *types.VmResponse, boot *BootConfig, keep int) {

	glog.V(1).Infof("Start VM %s in lazy mode, not started yet actually", vmId)

	context, err := InitContext(vmId, hub, client, nil, boot, keep)
	if err != nil {
		client <- &types.VmResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: err.Error(),
		}
		return
	}

	if _, ok := context.DCtx.(LazyDriverContext); !ok {
		glog.Error("not a lazy driver, cannot call lazy loop")
		context.reportBadRequest("not a lazy driver, cannot call lazy loop")
		return
	}

	err = context.DCtx.(LazyDriverContext).InitVM(context)
	if err != nil {
		estr := fmt.Sprintf("failed to create VM(%s): %s", vmId, err.Error())
		glog.Error(estr)
		client <- &types.VmResponse{
			VmId:  vmId,
			Code:  types.E_BAD_REQUEST,
			Cause: estr,
		}
		return
	}
	context.Become(statePreparing, "PREPARING")

	context.loop()
}
Esempio n. 6
0
// The caller must make sure that the restart policy and the status is right to restart
func (daemon *Daemon) RestartPod(mypod *hypervisor.PodStatus) error {
	// Remove the pod
	// The pod is stopped, the vm is gone
	for _, c := range mypod.Containers {
		glog.V(1).Infof("Ready to rm container: %s", c.Id)
		if _, _, err := daemon.DockerCli.SendCmdDelete(c.Id); err != nil {
			glog.V(1).Infof("Error to rm container: %s", err.Error())
		}
	}
	daemon.RemovePod(mypod.Id)
	daemon.DeletePodContainerFromDB(mypod.Id)
	daemon.DeleteVolumeId(mypod.Id)

	podData, err := daemon.GetPodByName(mypod.Id)
	if err != nil {
		return err
	}
	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	// Start the pod
	_, _, err = daemon.StartPod(mypod.Id, string(podData), "", nil, lazy, false, types.VM_KEEP_NONE, []*hypervisor.TtyIO{})
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if err := daemon.WritePodAndContainers(mypod.Id); err != nil {
		glog.Error("Found an error while saving the Containers info")
		return err
	}

	return nil
}
Esempio n. 7
0
File: dm.go Progetto: sulochan/hyper
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
	glog.Infof("/dev/mapper/%s", volName)
	if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
		return nil
	}
	if restore == false {
		parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}

	if restore == false {
		parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
		if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
			glog.Error(string(res))
			return fmt.Errorf(string(res))
		}
	}
	return nil
}
Esempio n. 8
0
File: pod.go Progetto: neujie/hyper
func (p *Pod) Start(daemon *Daemon, vmId string, lazy, autoremove bool, keep int, streams []*hypervisor.TtyIO) (*types.VmResponse, error) {

	var err error = nil

	if err = p.GetVM(daemon, vmId, lazy, keep); err != nil {
		return nil, err
	}

	defer func() {
		if err != nil && vmId == "" {
			p.KillVM(daemon)
		}
	}()

	if err = p.Prepare(daemon); err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			stopLogger(p.status)
		}
	}()

	if err = p.startLogging(daemon); err != nil {
		return nil, err
	}

	if err = p.AttachTtys(streams); err != nil {
		return nil, err
	}

	vmResponse := p.vm.StartPod(p.status, p.spec, p.containers, p.volumes)
	if vmResponse.Data == nil {
		err = fmt.Errorf("VM response data is nil")
		return vmResponse, err
	}

	err = daemon.UpdateVmData(p.vm.Id, vmResponse.Data.([]byte))
	if err != nil {
		glog.Error(err.Error())
		return nil, err
	}
	// add or update the Vm info for POD
	err = daemon.UpdateVmByPod(p.id, p.vm.Id)
	if err != nil {
		glog.Error(err.Error())
		return nil, err
	}

	return vmResponse, nil
}
Esempio n. 9
0
func processInjectFiles(container *pod.UserContainer, files map[string]pod.UserFile,
	id, storageDriver, devPrefix, rootPath, sharedDir string) error {
	for _, f := range container.Files {
		targetPath := f.Path
		if strings.HasSuffix(targetPath, "/") {
			targetPath = targetPath + f.Filename
		}
		file, ok := files[f.Filename]
		if !ok {
			continue
		}

		var src io.Reader

		if file.Uri != "" {
			urisrc, err := utils.UriReader(file.Uri)
			if err != nil {
				return err
			}
			defer urisrc.Close()
			src = urisrc
		} else {
			src = strings.NewReader(file.Contents)
		}

		switch file.Encoding {
		case "base64":
			src = base64.NewDecoder(base64.StdEncoding, src)
		default:
		}

		if storageDriver == "devicemapper" {
			err := dm.InjectFile(src, id, devPrefix, targetPath, rootPath,
				utils.PermInt(f.Perm), utils.UidInt(f.User), utils.UidInt(f.Group))
			if err != nil {
				glog.Error("got error when inject files ", err.Error())
				return err
			}
		} else if storageDriver == "aufs" || storageDriver == "overlay" {
			err := storage.FsInjectFile(src, id, targetPath, sharedDir,
				utils.PermInt(f.Perm), utils.UidInt(f.User), utils.UidInt(f.Group))
			if err != nil {
				glog.Error("got error when inject files ", err.Error())
				return err
			}
		}
	}

	return nil
}
Esempio n. 10
0
// launchQemu run qemu and wait it's quit, includes
func launchQemu(qc *QemuContext, ctx *hypervisor.VmContext) {
	qemu := qc.driver.executable
	if qemu == "" {
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find qemu executable"}
		return
	}

	args := qc.arguments(ctx)

	if glog.V(1) {
		glog.Info("cmdline arguments: ", strings.Join(args, " "))
	}

	pipe := make([]int, 2)
	err := syscall.Pipe(pipe)
	if err != nil {
		glog.Error("fail to create pipe")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "fail to create pipe"}
		return
	}

	err = daemon(qemu, append([]string{"qemu-system-x86_64"}, args...), pipe[1])
	if err != nil {
		//fail to daemonize
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}

	buf := make([]byte, 4)
	nr, err := syscall.Read(pipe[0], buf)
	if err != nil || nr != 4 {
		glog.Error("try to start qemu failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "try to start qemu failed"}
		return
	}
	syscall.Close(pipe[1])
	syscall.Close(pipe[0])

	pid := binary.BigEndian.Uint32(buf[:nr])
	glog.V(1).Infof("starting daemon with pid: %d", pid)

	err = ctx.DCtx.(*QemuContext).watchPid(int(pid), ctx.Hub)
	if err != nil {
		glog.Error("watch qemu process failed")
		ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "watch qemu process failed"}
		return
	}
}
Esempio n. 11
0
func (d *Driver) VmMountLayer(id string) error {
	if d.daemon == nil {
		if err := d.Setup(); err != nil {
			return err
		}
	}

	var (
		diffSrc = fmt.Sprintf("%s/diff/%s", d.RootPath(), id)
		volDst  = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
	)
	podstring, err := MakeMountPod("mac-vm-disk-mount-layer", "puller:latest", id, diffSrc, volDst)
	if err != nil {
		return err
	}
	podId := fmt.Sprintf("pull-%s", utils.RandStr(10, "alpha"))
	vm, ok := d.daemon.VmList[d.pullVm]
	if !ok {
		return fmt.Errorf("can not find VM(%s)", d.pullVm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err := d.daemon.StartPod(podId, podstring, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN)
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return err
		}
		vm := d.daemon.VmList[d.pullVm]
		// wait for cmd finish
		_, _, ret3, err := vm.GetVmChan()
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		subVmStatus := ret3.(chan *types.VmResponse)
		var vmResponse *types.VmResponse
		for {
			vmResponse = <-subVmStatus
			if vmResponse.VmId == d.pullVm {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		d.daemon.PodList[podId].Vm = d.pullVm
		// release pod from VM
		code, cause, err = d.daemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return err
		}
		d.daemon.CleanPod(podId)
	} else {
		glog.Errorf("pull vm should not be associated")
	}
	return nil
}
Esempio n. 12
0
func (daemon *Daemon) DeleteVolumeId(podId string) error {
	key := fmt.Sprintf("vol-%s", podId)
	iter := daemon.db.NewIterator(util.BytesPrefix([]byte(key)), nil)
	for iter.Next() {
		value := iter.Key()
		if string(value)[4:18] == podId {
			fields := strings.Split(string(iter.Value()), ":")
			dev_id, _ := strconv.Atoi(fields[1])
			if err := dm.DeleteVolume(daemon.Storage.DmPoolData, dev_id); err != nil {
				glog.Error(err.Error())
				return err
			}
		}
		err := daemon.db.Delete(value, nil)
		if err != nil {
			return err
		}
	}
	iter.Release()
	err := iter.Error()
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 13
0
func interfaceGot(index int, pciAddr int, name string, inf *network.Settings) (*InterfaceCreated, error) {
	ip, nw, err := net.ParseCIDR(fmt.Sprintf("%s/%d", inf.IPAddress, inf.IPPrefixLen))
	if err != nil {
		glog.Error("can not parse cidr")
		return &InterfaceCreated{Index: index, PCIAddr: pciAddr, DeviceName: name}, err
	}
	var tmp []byte = nw.Mask
	var mask net.IP = tmp

	rt := []*RouteRule{}
	/* Route rule is generated automaticly on first interface,
	 * or generated on the gateway configured interface. */
	if (index == 0 && inf.Automatic) || (!inf.Automatic && inf.Gateway != "") {
		rt = append(rt, &RouteRule{
			Destination: "0.0.0.0/0",
			Gateway:     inf.Gateway, ViaThis: true,
		})
	}

	return &InterfaceCreated{
		Index:      index,
		PCIAddr:    pciAddr,
		Bridge:     inf.Bridge,
		HostDevice: inf.Device,
		DeviceName: name,
		Fd:         inf.File,
		MacAddr:    inf.Mac,
		IpAddr:     ip.String(),
		NetMask:    mask.String(),
		RouteTable: rt,
	}, nil
}
Esempio n. 14
0
func (d *Driver) createDisk(id, parent string) error {
	// create a raw image
	if _, err := os.Stat(fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)); err == nil {
		return nil
	}
	var (
		parentDisk string = d.BaseImage()
		idDisk     string = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
	)
	if parent != "" {
		parentDisk = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), parent)
	}
	params := fmt.Sprintf("vboxmanage createhd --filename %s --diffparent %s --format VDI", idDisk, parentDisk)
	cmd := exec.Command("/bin/sh", "-c", params)
	if output, err := cmd.CombinedOutput(); err != nil {
		glog.Warningf(string(output))
		if strings.Contains(string(output), "not found in the media registry") {
			if err := virtualbox.RegisterDisk(d.pullVm, d.pullVm, parentDisk, 4); err != nil {
				return err
			}
		}
	}
	os.Chmod(idDisk, 0755)
	params = fmt.Sprintf("vboxmanage closemedium %s", idDisk)
	cmd = exec.Command("/bin/sh", "-c", params)
	if output, err := cmd.CombinedOutput(); err != nil {
		glog.Error(err.Error())
		return fmt.Errorf("error to run vboxmanage closemedium, %s", output)
	}
	return nil
}
Esempio n. 15
0
func (store *TagStore) LookupImage(name string) (*image.Image, error) {
	// FIXME: standardize on returning nil when the image doesn't exist, and err for everything else
	// (so we can pass all errors here)
	glog.V(1).Infof("LookupImage Name is %s", name)
	repoName, ref := parsers.ParseRepositoryTag(name)
	if ref == "" {
		ref = DEFAULTTAG
	}
	var (
		err error
		img *image.Image
	)

	img, err = store.GetImage(repoName, ref)
	if err != nil {
		return nil, err
	}

	if img != nil {
		return img, err
	}

	// name must be an image ID.
	store.Lock()
	defer store.Unlock()
	if img, err = store.graph.Get(name); err != nil {
		glog.Error(err.Error())
		return nil, err
	}

	return img, nil
}
Esempio n. 16
0
func (ctx *VmContext) ConfigureInterface(index int, pciAddr int, name string, config pod.UserInterface) {
	var err error
	var inf *network.Settings
	var maps []pod.UserContainerPort

	if index == 0 {
		for _, c := range ctx.userSpec.Containers {
			for _, m := range c.Ports {
				maps = append(maps, m)
			}
		}
	}

	if HDriver.BuildinNetwork() {
		/* VBox doesn't support join to bridge */
		inf, err = ctx.DCtx.ConfigureNetwork(ctx.Id, "", maps, config)
	} else {
		inf, err = network.Configure(ctx.Id, "", false, maps, config)
	}

	if err != nil {
		glog.Error("interface creating failed: ", err.Error())
		session := &InterfaceCreated{Index: index, PCIAddr: pciAddr, DeviceName: name}
		ctx.Hub <- &DeviceFailed{Session: session}
		return
	}

	session, err := interfaceGot(index, pciAddr, name, inf)
	if err != nil {
		ctx.Hub <- &DeviceFailed{Session: session}
		return
	}

	ctx.Hub <- session
}
Esempio n. 17
0
func (daemon *Daemon) CmdPodRun(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}
	var autoremove bool = false
	podArgs := job.Args[0]
	if job.Args[1] == "yes" {
		autoremove = true
	}

	podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha"))

	glog.Info(podArgs)

	var lazy bool = hypervisor.HDriver.SupportLazyMode()

	code, cause, err := daemon.StartPod(podId, podArgs, "", nil, lazy, autoremove, types.VM_KEEP_NONE)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", podId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Esempio n. 18
0
func (ctx *VmContext) allocateInterface(index int, pciAddr int, name string) (*InterfaceCreated, error) {
	var err error
	var inf *network.Settings
	var maps []pod.UserContainerPort

	if index == 0 {
		for _, c := range ctx.userSpec.Containers {
			for _, m := range c.Ports {
				maps = append(maps, m)
			}
		}
	}

	if HDriver.BuildinNetwork() {
		inf, err = ctx.DCtx.AllocateNetwork(ctx.Id, "", maps)
	} else {
		inf, err = network.Allocate(ctx.Id, "", false, maps)
	}

	if err != nil {
		glog.Error("interface creating failed: ", err.Error())

		return &InterfaceCreated{Index: index, PCIAddr: pciAddr, DeviceName: name}, err
	}

	return interfaceGot(index, pciAddr, name, inf)
}
Esempio n. 19
0
func (pinfo *PersistInfo) vmContext(hub chan VmEvent, client chan *types.VmResponse,
	wg *sync.WaitGroup) (*VmContext, error) {

	dc, err := HDriver.LoadContext(pinfo.DriverInfo)
	if err != nil {
		glog.Error("cannot load driver context: ", err.Error())
		return nil, err
	}

	ctx, err := InitContext(pinfo.Id, hub, client, dc, &BootConfig{}, types.VM_KEEP_NONE)
	if err != nil {
		return nil, err
	}

	ctx.vmSpec = pinfo.VmSpec
	ctx.userSpec = pinfo.UserSpec
	ctx.wg = wg

	ctx.loadHwStatus(pinfo)

	for idx, container := range ctx.vmSpec.Containers {
		ctx.ptys.ttys[container.Tty] = newAttachments(idx, true)
	}

	for _, vol := range pinfo.VolumeList {
		binfo := vol.blockInfo()
		if len(vol.Containers) != len(vol.MontPoints) {
			return nil, errors.New("persistent data corrupt, volume info mismatch")
		}
		if len(vol.MontPoints) == 1 && vol.MontPoints[0] == "/" {
			img := &imageInfo{
				info: binfo,
				pos:  vol.Containers[0],
			}
			ctx.devices.imageMap[vol.Name] = img
		} else {
			v := &volumeInfo{
				info:     binfo,
				pos:      make(map[int]string),
				readOnly: make(map[int]bool),
			}
			for i := 0; i < len(vol.Containers); i++ {
				idx := vol.Containers[i]
				v.pos[idx] = vol.MontPoints[i]
				v.readOnly[idx] = ctx.vmSpec.Containers[idx].roLookup(vol.MontPoints[i])
			}
		}
	}

	for _, nic := range pinfo.NetworkList {
		ctx.devices.networkMap[nic.Index] = &InterfaceCreated{
			Index:      nic.Index,
			PCIAddr:    nic.PciAddr,
			DeviceName: nic.DeviceName,
			IpAddr:     nic.IpAddr,
		}
	}

	return ctx, nil
}
Esempio n. 20
0
func (daemon *Daemon) CmdPodStart(job *engine.Job) error {
	// we can only support 1024 Pods
	if daemon.GetRunningPodNum() >= 1024 {
		return fmt.Errorf("Pod full, the maximum Pod is 1024!")
	}

	podId := job.Args[0]
	vmId := job.Args[1]

	glog.Infof("pod:%s, vm:%s", podId, vmId)
	// Do the status check for the given pod
	if _, ok := daemon.PodList[podId]; !ok {
		return fmt.Errorf("The pod(%s) can not be found, please create it first", podId)
	}
	var lazy bool = hypervisor.HDriver.SupportLazyMode() && vmId == ""

	code, cause, err := daemon.StartPod(podId, "", vmId, nil, lazy, false, types.VM_KEEP_NONE)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	// Prepare the VM status to client
	v := &engine.Env{}
	v.Set("ID", vmId)
	v.SetInt("Code", code)
	v.Set("Cause", cause)
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Esempio n. 21
0
File: dm.go Progetto: sulochan/hyper
func CreateNewDevice(containerId, devPrefix, rootPath string) error {
	var metadataPath = fmt.Sprintf("%s/metadata/", rootPath)
	// Get device id from the metadata file
	idMetadataFile := path.Join(metadataPath, containerId)
	if _, err := os.Stat(idMetadataFile); err != nil && os.IsNotExist(err) {
		return err
	}
	jsonData, err := ioutil.ReadFile(idMetadataFile)
	if err != nil {
		return err
	}
	var dat jsonMetadata
	if err := json.Unmarshal(jsonData, &dat); err != nil {
		return err
	}
	deviceId := dat.Device_id
	deviceSize := dat.Size
	// Activate the device for that device ID
	devName := fmt.Sprintf("%s-%s", devPrefix, containerId)
	poolName := fmt.Sprintf("/dev/mapper/%s-pool", devPrefix)
	createDeviceCmd := fmt.Sprintf("dmsetup create %s --table \"0 %d thin %s %d\"", devName, deviceSize/512, poolName, deviceId)
	createDeviceCommand := exec.Command("/bin/sh", "-c", createDeviceCmd)
	output, err := createDeviceCommand.Output()
	if err != nil {
		glog.Error(output)
		return err
	}
	return nil
}
Esempio n. 22
0
func (daemon *Daemon) CmdCommit(job *engine.Job) error {
	containerId := job.Args[0]
	repo := job.Args[1]
	author := job.Args[2]
	change := job.Args[3]
	message := job.Args[4]
	pause := job.Args[5]

	cli := daemon.DockerCli
	imgId, _, err := cli.SendContainerCommit(containerId, repo, author, change, message, pause)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	v := &engine.Env{}
	v.SetJson("ID", string(imgId))
	v.SetInt("Code", 0)
	v.Set("Cause", "")
	if _, err := v.WriteTo(job.Stdout); err != nil {
		return err
	}

	return nil
}
Esempio n. 23
0
func copyAsDirectory(source, destination string, destExisted bool) error {
	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
		glog.Error(err.Error())
		return err
	}
	return fixPermissions(source, destination, 0, 0, destExisted)
}
Esempio n. 24
0
func (ctx *VmContext) shutdownVM(err bool, msg string) {
	if err {
		ctx.reportVmFault(msg)
		glog.Error("Shutting down because of an exception: ", msg)
	}
	ctx.setTimeout(10)
	ctx.vm <- &DecodedMessage{code: INIT_DESTROYPOD, message: []byte{}}
}
Esempio n. 25
0
func (ctx *VmContext) poweroffVM(err bool, msg string) {
	if err {
		ctx.reportVmFault(msg)
		glog.Error("Shutting down because of an exception: ", msg)
	}
	ctx.DCtx.Shutdown(ctx)
	ctx.timedKill(10)
}
Esempio n. 26
0
func initFailureHandler(ctx *VmContext, ev VmEvent) bool {
	processed := true
	switch ev.Event() {
	case ERROR_INIT_FAIL: // VM connection Failure
		reason := ev.(*InitFailedEvent).Reason
		glog.Error(reason)
	case ERROR_QMP_FAIL: // Device allocate and insert Failure
		reason := "QMP protocol exception"
		if ev.(*DeviceFailed).Session != nil {
			reason = "QMP protocol exception: failed while waiting " + EventString(ev.(*DeviceFailed).Session.Event())
		}
		glog.Error(reason)
	default:
		processed = false
	}
	return processed
}
Esempio n. 27
0
func (d *Driver) Setup() (err error) {
	var (
		vm        *hypervisor.Vm
		ids       []string
		parentIds []string
	)
	if d.daemon == nil {
		d.daemon, err = GetDaemon()
		if err != nil {
			return err
		}
	}
	vm, err = d.daemon.StartVm(d.pullVm, 1, 64, false, types.VM_KEEP_AFTER_SHUTDOWN)
	if err != nil {
		glog.Errorf(err.Error())
		return err
	}
	defer func() {
		if err != nil {
			d.daemon.KillVm(vm.Id)
		}
	}()

	if err = d.daemon.WaitVmStart(vm); err != nil {
		glog.Error(err)
		return err
	}

	if err = virtualbox.RegisterDisk(d.pullVm, d.pullVm, d.BaseImage(), 4); err != nil {
		glog.Errorf(err.Error())
		return err
	}
	ids, err = loadIds(path.Join(d.RootPath(), "layers"))
	if err != nil {
		return err
	}

	for _, id := range ids {
		if d.disks[id] == true {
			continue
		}
		parentIds, err = getParentIds(d.RootPath(), id)
		if err != nil {
			glog.Warningf(err.Error())
			continue
		}
		for _, cid := range parentIds {
			if d.disks[cid] == true {
				continue
			}
			d.Exists(cid)
			d.disks[cid] = true
		}
		d.disks[id] = true
	}

	return nil
}
Esempio n. 28
0
func (dms *DevMapperStorage) RemoveVolume(podId string, record []byte) error {
	fields := strings.Split(string(record), ":")
	dev_id, _ := strconv.Atoi(fields[1])
	if err := dm.DeleteVolume(dms.DmPoolData, dev_id); err != nil {
		glog.Error(err.Error())
		return err
	}
	return nil
}
Esempio n. 29
0
func InitContext(id string, hub chan VmEvent, client chan *types.VmResponse, dc DriverContext, boot *BootConfig, keep int) (*VmContext, error) {
	var err error = nil

	vmChannel := make(chan *DecodedMessage, 128)

	//dir and sockets:
	homeDir := BaseDir + "/" + id + "/"
	hyperSockName := homeDir + HyperSockName
	ttySockName := homeDir + TtySockName
	consoleSockName := homeDir + ConsoleSockName
	shareDir := homeDir + ShareDirTag

	if dc == nil {
		dc = HDriver.InitContext(homeDir)
	}

	err = os.MkdirAll(shareDir, 0755)
	if err != nil {
		glog.Error("cannot make dir", shareDir, err.Error())
		return nil, err
	}
	defer func() {
		if err != nil {
			os.Remove(homeDir)
		}
	}()

	return &VmContext{
		Id:              id,
		Boot:            boot,
		pciAddr:         PciAddrFrom,
		scsiId:          0,
		attachId:        1,
		Hub:             hub,
		client:          client,
		DCtx:            dc,
		vm:              vmChannel,
		ptys:            newPts(),
		ttySessions:     make(map[string]uint64),
		HomeDir:         homeDir,
		HyperSockName:   hyperSockName,
		TtySockName:     ttySockName,
		ConsoleSockName: consoleSockName,
		ShareDir:        shareDir,
		InterfaceCount:  InterfaceCount,
		timer:           nil,
		handler:         stateInit,
		userSpec:        nil,
		vmSpec:          nil,
		devices:         newDeviceMap(),
		progress:        newProcessingList(),
		lock:            &sync.Mutex{},
		wait:            false,
		Keep:            keep,
	}, nil
}
Esempio n. 30
0
File: dm.go Progetto: sulochan/hyper
func DeleteVolume(dm *DeviceMapper, dev_id int) error {
	var parms string
	// Delete the thin pool for test
	parms = fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"delete %d\"", dm.PoolName, dev_id)
	if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
		glog.Error(string(res))
		return fmt.Errorf(string(res))
	}
	return nil
}