Ejemplo n.º 1
0
Archivo: aufs.go Proyecto: kkpapa/hyper
// useDirperm checks dirperm1 mount option can be used with the current
// version of aufs.
func useDirperm() bool {
	enableDirpermLock.Do(func() {
		base, err := ioutil.TempDir("", "docker-aufs-base")
		if err != nil {
			glog.Errorf("error checking dirperm1: %s", err.Error())
			return
		}
		defer os.RemoveAll(base)

		union, err := ioutil.TempDir("", "docker-aufs-union")
		if err != nil {
			glog.Errorf("error checking dirperm1: %s", err.Error())
			return
		}
		defer os.RemoveAll(union)

		opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
		if err := syscall.Mount("none", union, "aufs", 0, opts); err != nil {
			return
		}
		enableDirperm = true
		if err := aufsUnmount(union); err != nil {
			glog.Errorf("error checking dirperm1: failed to unmount %s", err.Error())
		}
	})
	return enableDirperm
}
Ejemplo n.º 2
0
Archivo: job.go Proyecto: huangqg/hyper
// Run executes the job and blocks until the job completes.
// If the job fails it returns an error
func (job *Job) Run() (err error) {
	defer func() {
		// Wait for all background tasks to complete
		if job.closeIO {
			if err := job.Stdout.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stderr.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
			if err := job.Stdin.Close(); err != nil {
				glog.Errorf("%s\n", err)
			}
		}
	}()

	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
		return fmt.Errorf("engine is shutdown")
	}
	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
	// everytime the daemon is cleanly restarted.
	// The permanent fix is to implement Job.Stop and Job.OnStop so that
	// ServeApi can cooperate and terminate cleanly.
	if job.Name != "serveapi" {
		job.Eng.l.Lock()
		job.Eng.tasks.Add(1)
		job.Eng.l.Unlock()
		defer job.Eng.tasks.Done()
	}
	// FIXME: make this thread-safe
	// FIXME: implement wait
	if !job.end.IsZero() {
		return fmt.Errorf("%s: job has already completed", job.Name)
	}
	// Log beginning and end of the job
	if job.Eng.Logging {
		glog.V(0).Infof("+job %s\n", job.CallString())
		defer func() {
			okerr := "OK"
			if err != nil {
				okerr = fmt.Sprintf("ERR: %s", err)
			}
			glog.V(0).Infof("-job %s %s\n", job.CallString(), okerr)
		}()
	}

	if job.handler == nil {
		return fmt.Errorf("%s: command not found\n", job.Name)
	}

	var errorMessage = bytes.NewBuffer(nil)
	job.Stderr.Add(errorMessage)

	err = job.handler(job)
	job.end = time.Now()

	return
}
Ejemplo n.º 3
0
// create and setup network bridge
func configureBridge(bridgeIP, bridgeIface string) error {
	var ifaceAddr string
	if len(bridgeIP) != 0 {
		_, _, err := net.ParseCIDR(bridgeIP)
		if err != nil {
			glog.Errorf("%s parsecidr failed\n", bridgeIP)
			return err
		}
		ifaceAddr = bridgeIP
	}

	if ifaceAddr == "" {
		return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually", bridgeIface, bridgeIface)
	}

	if err := CreateBridgeIface(bridgeIface); err != nil {
		// The bridge may already exist, therefore we can ignore an "exists" error
		if !os.IsExist(err) {
			glog.Errorf("CreateBridgeIface failed %s %s\n", bridgeIface, ifaceAddr)
			return err
		}
	}

	iface, err := net.InterfaceByName(bridgeIface)
	if err != nil {
		return err
	}

	ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr)
	if err != nil {
		return err
	}

	if ipAddr.Equal(ipNet.IP) {
		ipAddr, err = ipAllocator.RequestIP(ipNet, nil)
	} else {
		ipAddr, err = ipAllocator.RequestIP(ipNet, ipAddr)
	}

	if err != nil {
		return err
	}

	glog.V(3).Infof("Allocate IP Address %s for bridge %s\n", ipAddr, bridgeIface)

	if err := NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil {
		return fmt.Errorf("Unable to add private network: %s", err)
	}

	if err := NetworkLinkUp(iface); err != nil {
		return fmt.Errorf("Unable to start network bridge: %s", err)
	}
	return nil
}
Ejemplo n.º 4
0
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		// log the request
		glog.V(0).Infof("Calling %s %s\n", localMethod, localRoute)

		if logging {
			glog.V(1).Infof("%s %s\n", r.Method, r.RequestURI)
		}

		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
				glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
			}
		}
		version := version.Version(mux.Vars(r)["version"])
		if version == "" {
			version = utils.APIVERSION
		}
		if corsHeaders != "" {
			writeCorsHeaders(w, r, corsHeaders)
		}

		if version.GreaterThan(utils.APIVERSION) {
			http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound)
			return
		}

		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
			glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
			httpError(w, err)
		}
	}
}
Ejemplo n.º 5
0
func httpError(w http.ResponseWriter, err error) {
	statusCode := http.StatusInternalServerError
	// FIXME: this is brittle and should not be necessary.
	// If we need to differentiate between different possible error types, we should
	// create appropriate error types with clearly defined meaning.
	errStr := strings.ToLower(err.Error())
	if strings.Contains(errStr, "no such") {
		statusCode = http.StatusNotFound
	} else if strings.Contains(errStr, "bad parameter") {
		statusCode = http.StatusBadRequest
	} else if strings.Contains(errStr, "conflict") {
		statusCode = http.StatusConflict
	} else if strings.Contains(errStr, "impossible") {
		statusCode = http.StatusNotAcceptable
	} else if strings.Contains(errStr, "wrong login/password") {
		statusCode = http.StatusUnauthorized
	} else if strings.Contains(errStr, "hasn't been activated") {
		statusCode = http.StatusForbidden
	}

	if err != nil {
		glog.Errorf("HTTP Error: statusCode=%d %v", statusCode, err)
		http.Error(w, err.Error(), statusCode)
	}
}
Ejemplo n.º 6
0
// Release an interface for a select ip
func Release(releasedIP string, maps []pod.UserContainerPort, file *os.File) error {
	file.Close()
	if err := ipAllocator.ReleaseIP(bridgeIPv4Net, net.ParseIP(releasedIP)); err != nil {
		return err
	}

	if err := ReleasePortMaps(releasedIP, maps); err != nil {
		glog.Errorf("fail to release port map %s", err)
		return err
	}
	return nil
}
Ejemplo n.º 7
0
func (xc *XenContext) RemoveNic(ctx *hypervisor.VmContext, device, mac string, callback hypervisor.VmEvent) {
	go func() {
		res := HyperxlNicRemove(xc.driver.Ctx, (uint32)(xc.domId), mac)
		if res == 0 {
			glog.V(1).Infof("nic %s remove succeeded", device)
			ctx.Hub <- callback
			return
		}
		glog.Errorf("nic %s remove failed", device)
		ctx.Hub <- &hypervisor.DeviceFailed{
			Session: callback,
		}
	}()
}
Ejemplo n.º 8
0
func GenRandomMac() (string, error) {
	const alphanum = "0123456789abcdef"
	var bytes = make([]byte, 8)
	_, err := rand.Read(bytes)

	if err != nil {
		glog.Errorf("get random number faild")
		return "", err
	}

	for i, b := range bytes {
		bytes[i] = alphanum[b%byte(len(alphanum))]
	}

	tmp := []string{"52:54", string(bytes[0:2]), string(bytes[2:4]), string(bytes[4:6]), string(bytes[6:8])}
	return strings.Join(tmp, ":"), nil
}
Ejemplo n.º 9
0
func (xc *XenContext) AddNic(ctx *hypervisor.VmContext, host *hypervisor.HostNicInfo, guest *hypervisor.GuestNicInfo) {
	go func() {
		callback := &hypervisor.NetDevInsertedEvent{
			Index:      guest.Index,
			DeviceName: guest.Device,
			Address:    guest.Busaddr,
		}

		glog.V(1).Infof("allocate nic %s for dom %d", host.Mac, xc.domId)
		hw, err := net.ParseMAC(host.Mac)
		if err == nil {
			//dev := fmt.Sprintf("vif%d.%d", xc.domId, guest.Index)
			dev := host.Device
			glog.V(1).Infof("add network for %d - ip: %s, br: %s, gw: %s, dev: %s, hw: %s", xc.domId, guest.Ipaddr,
				host.Bridge, host.Bridge, dev, hw.String())

			res := HyperxlNicAdd(xc.driver.Ctx, (uint32)(xc.domId), guest.Ipaddr, host.Bridge, host.Bridge, dev, []byte(hw))
			if res == 0 {

				glog.V(1).Infof("nic %s insert succeeded", guest.Device)

				err = network.UpAndAddToBridge(fmt.Sprintf("vif%d.%d", xc.domId, guest.Index))
				if err != nil {
					glog.Error("fail to add vif to bridge: ", err.Error())
					ctx.Hub <- &hypervisor.DeviceFailed{
						Session: callback,
					}
					HyperxlNicRemove(xc.driver.Ctx, (uint32)(xc.domId), host.Mac)
					return
				}

				ctx.Hub <- callback
				return
			}
			glog.V(1).Infof("nic %s insert succeeded [faked] ", guest.Device)
			ctx.Hub <- callback
			return
		}

		glog.Errorf("nic %s insert failed", guest.Device)
		ctx.Hub <- &hypervisor.DeviceFailed{
			Session: callback,
		}
	}()
}
Ejemplo n.º 10
0
// ServeApi loops through all of the protocols sent in to docker and spawns
// off a go routine to setup a serving http.Server for each.
func ServeApi(job *engine.Job) error {
	if len(job.Args) == 0 {
		return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
	}
	var (
		protoAddrs = job.Args
		chErrors   = make(chan error, len(protoAddrs))
	)
	activationLock = make(chan struct{})

	for _, protoAddr := range protoAddrs {
		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
		if len(protoAddrParts) != 2 {
			return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
		}
		go func() {
			glog.V(0).Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
			srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job)
			if err != nil {
				chErrors <- err
				return
			}
			job.Eng.OnShutdown(func() {
				if err := srv.Close(); err != nil {
					glog.Errorf("%s\n", err)
				}
			})
			if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
				err = nil
			}
			chErrors <- err
		}()
	}

	for i := 0; i < len(protoAddrs); i++ {
		err := <-chErrors
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 11
0
func (p *PortMapper) ReleaseMap(protocol string, hostPort int) error {
	p.mutex.Lock()
	defer p.mutex.Unlock()

	var pset PortSet

	if strings.EqualFold(protocol, "udp") {
		pset = p.udpMap
	} else {
		pset = p.tcpMap
	}

	_, ok := pset[hostPort]
	if !ok {
		glog.Errorf("Host port %d has not been used", hostPort)
	}

	delete(pset, hostPort)
	return nil
}
Ejemplo n.º 12
0
func diskRoutine(add bool, xc *XenContext, ctx *hypervisor.VmContext,
	name, sourceType, filename, format string, id int, callback hypervisor.VmEvent) {
	backend := LIBXL_DISK_BACKEND_TAP
	if strings.HasPrefix(filename, "/dev/") {
		backend = LIBXL_DISK_BACKEND_PHY
	}
	dfmt := LIBXL_DISK_FORMAT_RAW
	if format == "qcow" || format == "qcow2" {
		dfmt = LIBXL_DISK_FORMAT_QCOW2
	}

	devName := xvdId2Name(id)
	var res int
	var op string = "insert"
	if add {
		res = HyperxlDiskAdd(xc.driver.Ctx, uint32(xc.domId), filename, devName, LibxlDiskBackend(backend), LibxlDiskFormat(dfmt))
		callback = &hypervisor.BlockdevInsertedEvent{
			Name:       name,
			SourceType: sourceType,
			DeviceName: devName,
			ScsiId:     id,
		}
	} else {
		op = "remove"
		res = HyperxlDiskRemove(xc.driver.Ctx, uint32(xc.domId), filename, devName, LibxlDiskBackend(backend), LibxlDiskFormat(dfmt))
	}
	if res == 0 {
		glog.V(1).Infof("Disk %s (%s) %s succeeded", devName, filename, op)
		ctx.Hub <- callback
		return
	}

	glog.Errorf("Disk %s (%s) insert %s failed", devName, filename, op)
	ctx.Hub <- &hypervisor.DeviceFailed{
		Session: callback,
	}
}
Ejemplo n.º 13
0
func UpAndAddToBridge(name string) error {
	inf, err := net.InterfaceByName(name)
	if err != nil {
		glog.Error("cannot find network interface ", name)
		return err
	}
	brg, err := net.InterfaceByName(BridgeIface)
	if err != nil {
		glog.Error("cannot find bridge interface ", BridgeIface)
		return err
	}
	err = AddToBridge(inf, brg)
	if err != nil {
		glog.Errorf("cannot add %s to %s ", name, BridgeIface)
		return err
	}
	err = NetworkLinkUp(inf)
	if err != nil {
		glog.Error("cannot up interface ", name)
		return err
	}

	return nil
}
Ejemplo n.º 14
0
// This function will only be invoked during daemon start
func (daemon *Daemon) AssociateAllVms() error {
	for _, mypod := range daemon.podList {
		if mypod.Vm == "" {
			continue
		}
		data, err := daemon.GetPodByName(mypod.Id)
		if err != nil {
			continue
		}
		userPod, err := pod.ProcessPodBytes(data)
		if err != nil {
			continue
		}
		glog.V(1).Infof("Associate the POD(%s) with VM(%s)", mypod.Id, mypod.Vm)
		var (
			qemuPodEvent  = make(chan hypervisor.VmEvent, 128)
			qemuStatus    = make(chan *types.QemuResponse, 128)
			subQemuStatus = make(chan *types.QemuResponse, 128)
		)
		data, err = daemon.GetVmData(mypod.Vm)
		if err != nil {
			continue
		}
		glog.V(1).Infof("The data for vm(%s) is %v", mypod.Vm, data)
		go hypervisor.VmAssociate(hypervisorDriver, mypod.Vm, qemuPodEvent,
			qemuStatus, mypod.Wg, data)
		ass := <-qemuStatus
		if ass.Code != types.E_OK {
			glog.Errorf("cannot associate with vm: %s, error status %d (%s)", mypod.Vm, ass.Code, ass.Cause)
			return errors.New("load vm status failed")
		}
		if err := daemon.SetQemuChan(mypod.Vm, qemuPodEvent, qemuStatus, subQemuStatus); err != nil {
			glog.V(1).Infof("SetQemuChan error: %s", err.Error())
			return err
		}
		vm := &Vm{
			Id:     mypod.Vm,
			Pod:    mypod,
			Status: types.S_VM_ASSOCIATED,
			Cpu:    userPod.Resource.Vcpu,
			Mem:    userPod.Resource.Memory,
		}
		daemon.AddVm(vm)
		daemon.SetContainerStatus(mypod.Id, types.S_POD_RUNNING)
		mypod.Status = types.S_POD_RUNNING
		go func(interface{}) {
			for {
				podId := mypod.Id
				qemuResponse := <-qemuStatus
				subQemuStatus <- qemuResponse
				if qemuResponse.Code == types.E_POD_FINISHED {
					data := qemuResponse.Data.([]uint32)
					daemon.SetPodContainerStatus(podId, data)
				} else if qemuResponse.Code == types.E_VM_SHUTDOWN {
					if daemon.podList[mypod.Id].Status == types.S_POD_RUNNING {
						daemon.podList[mypod.Id].Status = types.S_POD_SUCCEEDED
						daemon.SetContainerStatus(podId, types.S_POD_SUCCEEDED)
					}
					daemon.podList[mypod.Id].Vm = ""
					daemon.RemoveVm(mypod.Vm)
					daemon.DeleteQemuChan(mypod.Vm)
					if mypod.Type == "kubernetes" {
						switch mypod.Status {
						case types.S_POD_SUCCEEDED:
							if mypod.RestartPolicy == "always" {
								daemon.RestartPod(mypod)
							} else {
								daemon.DeletePodFromDB(podId)
								for _, c := range mypod.Containers {
									glog.V(1).Infof("Ready to rm container: %s", c.Id)
									if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil {
										glog.V(1).Infof("Error to rm container: %s", err.Error())
									}
								}
								//								daemon.RemovePod(podId)
								daemon.DeletePodContainerFromDB(podId)
								daemon.DeleteVolumeId(podId)
							}
							break
						case types.S_POD_FAILED:
							if mypod.RestartPolicy != "never" {
								daemon.RestartPod(mypod)
							} else {
								daemon.DeletePodFromDB(podId)
								for _, c := range mypod.Containers {
									glog.V(1).Infof("Ready to rm container: %s", c.Id)
									if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil {
										glog.V(1).Infof("Error to rm container: %s", err.Error())
									}
								}
								//								daemon.RemovePod(podId)
								daemon.DeletePodContainerFromDB(podId)
								daemon.DeleteVolumeId(podId)
							}
							break
						default:
							break
						}
					}
					break
				}
			}
		}(subQemuStatus)
	}
	return nil
}
Ejemplo n.º 15
0
func (daemon *Daemon) StartPod(podId, vmId, podArgs string) (int, string, error) {
	var (
		fstype            string
		poolName          string
		volPoolName       string
		devPrefix         string
		storageDriver     string
		rootPath          string
		devFullName       string
		rootfs            string
		containerInfoList = []*hypervisor.ContainerInfo{}
		volumuInfoList    = []*hypervisor.VolumeInfo{}
		cli               = daemon.dockerCli
		qemuPodEvent      = make(chan hypervisor.VmEvent, 128)
		qemuStatus        = make(chan *types.QemuResponse, 128)
		subQemuStatus     = make(chan *types.QemuResponse, 128)
		sharedDir         = path.Join(hypervisor.BaseDir, vmId, hypervisor.ShareDirTag)
		podData           []byte
		mypod             *Pod
		wg                *sync.WaitGroup
		err               error
		uid               string
		gid               string
	)
	if podArgs == "" {
		mypod = daemon.podList[podId]
		if mypod == nil {
			return -1, "", fmt.Errorf("Can not find the POD instance of %s", podId)
		}
		podData, err = daemon.GetPodByName(podId)
		if err != nil {
			return -1, "", err
		}
		wg = mypod.Wg
	} else {
		podData = []byte(podArgs)
	}
	userPod, err := pod.ProcessPodBytes(podData)
	if err != nil {
		return -1, "", err
	}

	vm := daemon.vmList[vmId]
	if vm == nil {
		glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.kernel, daemon.initrd)
		var (
			cpu = 1
			mem = 128
		)
		if userPod.Resource.Vcpu > 0 {
			cpu = userPod.Resource.Vcpu
		}
		if userPod.Resource.Memory > 0 {
			mem = userPod.Resource.Memory
		}
		b := &hypervisor.BootConfig{
			CPU:    cpu,
			Memory: mem,
			Kernel: daemon.kernel,
			Initrd: daemon.initrd,
			Bios:   daemon.bios,
			Cbfs:   daemon.cbfs,
		}
		go hypervisor.VmLoop(hypervisorDriver, vmId, qemuPodEvent, qemuStatus, b)
		if err := daemon.SetQemuChan(vmId, qemuPodEvent, qemuStatus, subQemuStatus); err != nil {
			glog.V(1).Infof("SetQemuChan error: %s", err.Error())
			return -1, "", err
		}

	} else {
		ret1, ret2, ret3, err := daemon.GetQemuChan(vmId)
		if err != nil {
			return -1, "", err
		}
		qemuPodEvent = ret1.(chan hypervisor.VmEvent)
		qemuStatus = ret2.(chan *types.QemuResponse)
		subQemuStatus = ret3.(chan *types.QemuResponse)
	}
	if podArgs != "" {
		wg = new(sync.WaitGroup)
		if err := daemon.CreatePod(podArgs, podId, wg); err != nil {
			glog.Error(err.Error())
			return -1, "", err
		}
		mypod = daemon.podList[podId]
	}

	storageDriver = daemon.Storage.StorageType
	if storageDriver == "devicemapper" {
		poolName = daemon.Storage.PoolName
		fstype = daemon.Storage.Fstype
		volPoolName = "hyper-volume-pool"
		devPrefix = poolName[:strings.Index(poolName, "-pool")]
		rootPath = "/var/lib/docker/devicemapper"
		rootfs = "/rootfs"
	} else if storageDriver == "aufs" {
		rootPath = daemon.Storage.RootPath
		fstype = daemon.Storage.Fstype
		rootfs = ""
	} else if storageDriver == "overlay" {
		rootPath = daemon.Storage.RootPath
		fstype = daemon.Storage.Fstype
		rootfs = ""
	}

	// Process the 'Files' section
	files := make(map[string](pod.UserFile))
	for _, v := range userPod.Files {
		files[v.Name] = v
	}

	for i, c := range mypod.Containers {
		var jsonResponse *docker.ConfigJSON
		if jsonResponse, err = cli.GetContainerInfo(c.Id); err != nil {
			glog.Error("got error when get container Info ", err.Error())
			return -1, "", err
		}

		if storageDriver == "devicemapper" {
			if err := dm.CreateNewDevice(c.Id, devPrefix, rootPath); err != nil {
				return -1, "", err
			}
			devFullName, err = dm.MountContainerToSharedDir(c.Id, sharedDir, devPrefix)
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return -1, "", err
			}
			fstype, err = dm.ProbeFsType(devFullName)
			if err != nil {
				fstype = "ext4"
			}
		} else if storageDriver == "aufs" {
			devFullName, err = aufs.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "")
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return -1, "", err
			}
			devFullName = "/" + c.Id + "/rootfs"
		} else if storageDriver == "overlay" {
			devFullName, err = overlay.MountContainerToSharedDir(c.Id, rootPath, sharedDir, "")
			if err != nil {
				glog.Error("got error when mount container to share dir ", err.Error())
				return -1, "", err
			}
			devFullName = "/" + c.Id + "/rootfs"
		}

		for _, f := range userPod.Containers[i].Files {
			targetPath := f.Path
			file, ok := files[f.Filename]
			if !ok {
				continue
			}
			var fromFile = "/tmp/" + file.Name
			defer os.RemoveAll(fromFile)
			if file.Uri != "" {
				err = utils.DownloadFile(file.Uri, fromFile)
				if err != nil {
					return -1, "", err
				}
			} else if file.Contents != "" {
				err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666)
				if err != nil {
					return -1, "", err
				}
			} else {
				continue
			}
			// we need to decode the content
			fi, err := os.Open(fromFile)
			if err != nil {
				return -1, "", err
			}
			defer fi.Close()
			fileContent, err := ioutil.ReadAll(fi)
			if err != nil {
				return -1, "", err
			}
			if file.Encoding == "base64" {
				newContent, err := utils.Base64Decode(string(fileContent))
				if err != nil {
					return -1, "", err
				}
				err = ioutil.WriteFile(fromFile, []byte(newContent), 0666)
				if err != nil {
					return -1, "", err
				}
			} else {
				err = ioutil.WriteFile(fromFile, []byte(file.Contents), 0666)
				if err != nil {
					return -1, "", err
				}
			}
			// get the uid and gid for that attached file
			fileUser := f.User
			fileGroup := f.Group
			u, _ := user.Current()
			if fileUser == "" {
				uid = u.Uid
			} else {
				u, _ = user.Lookup(fileUser)
				uid = u.Uid
				gid = u.Gid
			}
			if fileGroup == "" {
				gid = u.Gid
			}

			if storageDriver == "devicemapper" {
				err := dm.AttachFiles(c.Id, devPrefix, fromFile, targetPath, rootPath, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return -1, "", err
				}
			} else if storageDriver == "aufs" {
				err := aufs.AttachFiles(c.Id, fromFile, targetPath, rootPath, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return -1, "", err
				}
			} else if storageDriver == "overlay" {
				err := overlay.AttachFiles(c.Id, fromFile, targetPath, rootPath, f.Perm, uid, gid)
				if err != nil {
					glog.Error("got error when attach files ", err.Error())
					return -1, "", err
				}
			}
		}

		env := make(map[string]string)
		for _, v := range jsonResponse.Config.Env {
			env[v[:strings.Index(v, "=")]] = v[strings.Index(v, "=")+1:]
		}
		for _, e := range userPod.Containers[i].Envs {
			env[e.Env] = e.Value
		}
		glog.V(1).Infof("Parsing envs for container %d: %d Evs", i, len(env))
		glog.V(1).Infof("The fs type is %s", fstype)
		glog.V(1).Infof("WorkingDir is %s", string(jsonResponse.Config.WorkingDir))
		glog.V(1).Infof("Image is %s", string(devFullName))
		containerInfo := &hypervisor.ContainerInfo{
			Id:         c.Id,
			Rootfs:     rootfs,
			Image:      devFullName,
			Fstype:     fstype,
			Workdir:    jsonResponse.Config.WorkingDir,
			Entrypoint: jsonResponse.Config.Entrypoint,
			Cmd:        jsonResponse.Config.Cmd,
			Envs:       env,
		}
		glog.V(1).Infof("Container Info is \n%v", containerInfo)
		containerInfoList = append(containerInfoList, containerInfo)
		glog.V(1).Infof("container %d created %s, workdir %s, env: %v", i, c.Id, jsonResponse.Config.WorkingDir, env)
	}

	// Process the 'Volumes' section
	for _, v := range userPod.Volumes {
		if v.Source == "" {
			if storageDriver == "devicemapper" {
				volName := fmt.Sprintf("%s-%s-%s", volPoolName, podId, v.Name)
				dev_id, _ := daemon.GetVolumeId(podId, volName)
				glog.Error("DeviceID is %d", dev_id)
				if dev_id < 1 {
					dev_id, _ = daemon.GetMaxDeviceId()
					err := daemon.CreateVolume(podId, volName, fmt.Sprintf("%d", dev_id+1), false)
					if err != nil {
						return -1, "", err
					}
				} else {
					err := daemon.CreateVolume(podId, volName, fmt.Sprintf("%d", dev_id), true)
					if err != nil {
						return -1, "", err
					}
				}

				fstype, err = dm.ProbeFsType("/dev/mapper/" + volName)
				if err != nil {
					fstype = "ext4"
				}
				myVol := &hypervisor.VolumeInfo{
					Name:     v.Name,
					Filepath: path.Join("/dev/mapper/", volName),
					Fstype:   fstype,
					Format:   "raw",
				}
				volumuInfoList = append(volumuInfoList, myVol)
				glog.V(1).Infof("volume %s created with dm as %s", v.Name, volName)
				continue

			} else {
				// Make sure the v.Name is given
				v.Source = path.Join("/var/tmp/hyper/", v.Name)
				if _, err := os.Stat(v.Source); err != nil && os.IsNotExist(err) {
					if err := os.MkdirAll(v.Source, os.FileMode(0777)); err != nil {
						return -1, "", err
					}
				}
				v.Driver = "vfs"
			}
		}

		if v.Driver != "vfs" {
			glog.V(1).Infof("bypass %s volume %s", v.Driver, v.Name)
			continue
		}

		// Process the situation if the source is not NULL, we need to bind that dir to sharedDir
		var flags uintptr = syscall.MS_BIND

		mountSharedDir := pod.RandStr(10, "alpha")
		targetDir := path.Join(sharedDir, mountSharedDir)
		glog.V(1).Infof("trying to bind dir %s to %s", v.Source, targetDir)

		if err := os.MkdirAll(targetDir, 0755); err != nil && !os.IsExist(err) {
			glog.Errorf("error to create dir %s for volume %s", targetDir, v.Name)
			return -1, "", err
		}

		if err := syscall.Mount(v.Source, targetDir, "dir", flags, "--bind"); err != nil {
			glog.Errorf("bind dir %s failed: %s", v.Source, err.Error())
			return -1, "", err
		}
		myVol := &hypervisor.VolumeInfo{
			Name:     v.Name,
			Filepath: mountSharedDir,
			Fstype:   "dir",
			Format:   "",
		}
		glog.V(1).Infof("dir %s is bound to %s", v.Source, targetDir)
		volumuInfoList = append(volumuInfoList, myVol)
	}

	go func(interface{}) {
		for {
			qemuResponse := <-qemuStatus
			subQemuStatus <- qemuResponse
			if qemuResponse.Code == types.E_POD_FINISHED {
				data := qemuResponse.Data.([]uint32)
				daemon.SetPodContainerStatus(podId, data)
				daemon.podList[podId].Vm = ""
			} else if qemuResponse.Code == types.E_VM_SHUTDOWN {
				if daemon.podList[podId].Status == types.S_POD_RUNNING {
					daemon.podList[podId].Status = types.S_POD_SUCCEEDED
					daemon.SetContainerStatus(podId, types.S_POD_SUCCEEDED)
				}
				daemon.podList[podId].Vm = ""
				daemon.RemoveVm(vmId)
				daemon.DeleteQemuChan(vmId)
				mypod = daemon.podList[podId]
				if mypod.Type == "kubernetes" {
					switch mypod.Status {
					case types.S_POD_SUCCEEDED:
						if mypod.RestartPolicy == "always" {
							daemon.RestartPod(mypod)
						} else {
							daemon.DeletePodFromDB(podId)
							for _, c := range daemon.podList[podId].Containers {
								glog.V(1).Infof("Ready to rm container: %s", c.Id)
								if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil {
									glog.V(1).Infof("Error to rm container: %s", err.Error())
								}
							}
							//							daemon.RemovePod(podId)
							daemon.DeletePodContainerFromDB(podId)
							daemon.DeleteVolumeId(podId)
						}
						break
					case types.S_POD_FAILED:
						if mypod.RestartPolicy != "never" {
							daemon.RestartPod(mypod)
						} else {
							daemon.DeletePodFromDB(podId)
							for _, c := range daemon.podList[podId].Containers {
								glog.V(1).Infof("Ready to rm container: %s", c.Id)
								if _, _, err = daemon.dockerCli.SendCmdDelete(c.Id); err != nil {
									glog.V(1).Infof("Error to rm container: %s", err.Error())
								}
							}
							//							daemon.RemovePod(podId)
							daemon.DeletePodContainerFromDB(podId)
							daemon.DeleteVolumeId(podId)
						}
						break
					default:
						break
					}
				}
				break
			}
		}
	}(subQemuStatus)

	if daemon.podList[podId].Type == "kubernetes" {
		for _, c := range userPod.Containers {
			c.RestartPolicy = "never"
		}
	}

	fmt.Printf("POD id is %s\n", podId)
	runPodEvent := &hypervisor.RunPodCommand{
		Spec:       userPod,
		Containers: containerInfoList,
		Volumes:    volumuInfoList,
		Wg:         wg,
	}
	qemuPodEvent <- runPodEvent
	daemon.podList[podId].Status = types.S_POD_RUNNING
	// Set the container status to online
	daemon.SetContainerStatus(podId, types.S_POD_RUNNING)

	// wait for the qemu response
	var qemuResponse *types.QemuResponse
	for {
		qemuResponse = <-subQemuStatus
		glog.V(1).Infof("Get the response from QEMU, VM id is %s!", qemuResponse.VmId)
		if qemuResponse.Code == types.E_VM_RUNNING {
			continue
		}
		if qemuResponse.VmId == vmId {
			break
		}
	}
	if qemuResponse.Data == nil {
		return qemuResponse.Code, qemuResponse.Cause, fmt.Errorf("QEMU response data is nil")
	}
	data := qemuResponse.Data.([]byte)
	daemon.UpdateVmData(vmId, data)
	// add or update the Vm info for POD
	if err := daemon.UpdateVmByPod(podId, vmId); err != nil {
		glog.Error(err.Error())
	}

	// XXX we should not close qemuStatus chan, it will be closed in shutdown process
	return qemuResponse.Code, qemuResponse.Cause, nil
}
Ejemplo n.º 16
0
func NewDaemonFromDirectory(eng *engine.Engine) (*Daemon, error) {
	// register portallocator release on shutdown
	eng.OnShutdown(func() {
		if err := portallocator.ReleaseAll(); err != nil {
			glog.Errorf("portallocator.ReleaseAll(): %s", err.Error())
		}
	})
	// Check that the system is supported and we have sufficient privileges
	if runtime.GOOS != "linux" {
		return nil, fmt.Errorf("The Docker daemon is only supported on linux")
	}
	if os.Geteuid() != 0 {
		return nil, fmt.Errorf("The Docker daemon needs to be run as root")
	}
	if err := checkKernel(); err != nil {
		return nil, err
	}

	cfg, err := goconfig.LoadConfigFile(eng.Config)
	if err != nil {
		glog.Errorf("Read config file (%s) failed, %s", eng.Config, err.Error())
		return nil, err
	}
	kernel, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Kernel")
	initrd, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Initrd")
	glog.V(0).Infof("The config: kernel=%s, initrd=%s", kernel, initrd)
	biface, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Bridge")
	bridgeip, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "BridgeIP")
	glog.V(0).Infof("The config: bridge=%s, ip=%s", biface, bridgeip)
	bios, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Bios")
	cbfs, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Cbfs")
	glog.V(0).Infof("The config: bios=%s, cbfs=%s", bios, cbfs)
	host, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Host")

	var tempdir = "/var/run/hyper/"
	os.Setenv("TMPDIR", tempdir)
	if err := os.MkdirAll(tempdir, 0755); err != nil && !os.IsExist(err) {
		return nil, err
	}

	var realRoot = "/var/lib/hyper/"
	// Create the root directory if it doesn't exists
	if err := os.MkdirAll(realRoot, 0755); err != nil && !os.IsExist(err) {
		return nil, err
	}

	if err := network.InitNetwork(biface, bridgeip); err != nil {
		glog.Errorf("InitNetwork failed, %s\n", err.Error())
		return nil, err
	}

	var (
		proto   = "unix"
		addr    = "/var/run/docker.sock"
		db_file = fmt.Sprintf("%s/hyper.db", realRoot)
	)
	db, err := leveldb.OpenFile(db_file, nil)
	if err != nil {
		glog.Errorf("open leveldb file failed, %s\n", err.Error())
		return nil, err
	}
	dockerCli := docker.NewDockerCli("", proto, addr, nil)
	qemuchan := map[string]interface{}{}
	qemuclient := map[string]interface{}{}
	subQemuClient := map[string]interface{}{}
	cList := []*Container{}
	pList := map[string]*Pod{}
	vList := map[string]*Vm{}
	daemon := &Daemon{
		ID:                fmt.Sprintf("%d", os.Getpid()),
		db:                db,
		eng:               eng,
		kernel:            kernel,
		initrd:            initrd,
		bios:              bios,
		cbfs:              cbfs,
		dockerCli:         dockerCli,
		containerList:     cList,
		podList:           pList,
		vmList:            vList,
		qemuChan:          qemuchan,
		qemuClientChan:    qemuclient,
		subQemuClientChan: subQemuClient,
		Host:              host,
	}

	stor := &Storage{}
	// Get the docker daemon info
	body, _, err := dockerCli.SendCmdInfo()
	if err != nil {
		return nil, err
	}
	outInfo := engine.NewOutput()
	remoteInfo, err := outInfo.AddEnv()
	if err != nil {
		return nil, err
	}
	if _, err := outInfo.Write(body); err != nil {
		return nil, fmt.Errorf("Error while reading remote info!\n")
	}
	outInfo.Close()
	storageDriver := remoteInfo.Get("Driver")
	stor.StorageType = storageDriver
	if storageDriver == "devicemapper" {
		if remoteInfo.Exists("DriverStatus") {
			var driverStatus [][2]string
			if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
				return nil, err
			}
			for _, pair := range driverStatus {
				if pair[0] == "Pool Name" {
					stor.PoolName = pair[1]
				}
				if pair[0] == "Backing Filesystem" {
					if strings.Contains(pair[1], "ext") {
						stor.Fstype = "ext4"
					} else if strings.Contains(pair[1], "xfs") {
						stor.Fstype = "xfs"
					} else {
						stor.Fstype = "dir"
					}
					break
				}
			}
		}
	} else {
		if remoteInfo.Exists("DriverStatus") {
			var driverStatus [][2]string
			if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
				return nil, err
			}
			for _, pair := range driverStatus {
				if pair[0] == "Root Dir" {
					stor.RootPath = pair[1]
				}
				if pair[0] == "Backing Filesystem" {
					stor.Fstype = "dir"
					break
				}
			}
		}
	}
	daemon.Storage = stor
	dmPool := dm.DeviceMapper{
		Datafile:         "/var/lib/hyper/data",
		Metadatafile:     "/var/lib/hyper/metadata",
		DataLoopFile:     "/dev/loop6",
		MetadataLoopFile: "/dev/loop7",
		PoolName:         "hyper-volume-pool",
		Size:             20971520 * 512,
	}
	if storageDriver == "devicemapper" {
		daemon.Storage.DmPoolData = &dmPool
		// Prepare the DeviceMapper storage
		if err := dm.CreatePool(&dmPool); err != nil {
			return nil, err
		}
	} else {
		daemon.CleanVolume(0)
	}
	eng.OnShutdown(func() {
		if err := daemon.shutdown(); err != nil {
			glog.Errorf("Error during daemon.shutdown(): %v", err)
		}
	})

	return daemon, nil
}
Ejemplo n.º 17
0
func daemon(cmd string, argv []string, pipe int) error {

	// create a subprocess
	pid, err := fork(false)
	if err != nil {
		return err
	} else if pid > 0 {
		go func() {
			wp, err := syscall.Wait4(int(pid), nil, 0, nil)
			if err == nil {
				glog.V(3).Infof("collect child %d", wp)
			} else {
				glog.Errorf("error during wait %d: %s", pid, err.Error())
			}
		}()
		// return the parent
		return nil
	}

	// exit the created one, create the daemon
	_, err = fork(true)
	if err != nil {
		glog.Error("second fork failed: ", err.Error())
		os.Exit(-1)
	}

	cur := os.Getpid()
	glog.V(1).Infof("qemu daemon pid %d.", cur)
	//Change the file mode mask
	_ = syscall.Umask(0)

	// create a new SID for the child process
	s_ret, err := syscall.Setsid()
	if err != nil {
		glog.Info("Error: syscall.Setsid errno: ", err.Error())
		os.Exit(-1)
	}
	if s_ret < 0 {
		glog.Errorf("setsid return negative value: %d", s_ret)
		os.Exit(-1)
	}

	os.Chdir("/")

	f, e := os.OpenFile("/dev/null", os.O_RDWR, 0)
	if e == nil {
		fd := f.Fd()
		syscall.Dup2(int(fd), int(os.Stdin.Fd()))
		syscall.Dup2(int(fd), int(os.Stdout.Fd()))
		syscall.Dup2(int(fd), int(os.Stderr.Fd()))
	}

	buf := make([]byte, 4)
	binary.BigEndian.PutUint32(buf, uint32(cur))
	syscall.Write(pipe, buf)
	syscall.Close(pipe)

	fds := listFd()
	for _, fd := range fds {
		if f, err := strconv.Atoi(fd); err == nil && f > 2 {
			glog.V(1).Infof("close fd %d", f)
			syscall.Close(f)
		}
	}

	err = syscall.Exec(cmd, argv, []string{})
	if err != nil {
		glog.Error("fail to exec qemu process")
		os.Exit(-1)
	}

	return nil
}
Ejemplo n.º 18
0
func Allocate(requestedIP string) (*Settings, error) {
	var (
		req   ifReq
		errno syscall.Errno
	)

	ip, err := ipAllocator.RequestIP(bridgeIPv4Net, net.ParseIP(requestedIP))
	if err != nil {
		return nil, err
	}

	maskSize, _ := bridgeIPv4Net.Mask.Size()

	tapFile, err = os.OpenFile("/dev/net/tun", os.O_RDWR, 0)
	if err != nil {
		return nil, err
	}

	req.Flags = CIFF_TAP | CIFF_NO_PI | CIFF_ONE_QUEUE
	_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, tapFile.Fd(),
		uintptr(syscall.TUNSETIFF),
		uintptr(unsafe.Pointer(&req)))
	if errno != 0 {
		err = fmt.Errorf("create tap device failed\n")
		tapFile.Close()
		return nil, err
	}

	device := strings.Trim(string(req.Name[:]), "\x00")

	tapIface, err := net.InterfaceByName(device)
	if err != nil {
		glog.Errorf("get interface by name %s failed %s", device, err)
		tapFile.Close()
		return nil, err
	}

	bIface, err := net.InterfaceByName(BridgeIface)
	if err != nil {
		glog.Errorf("get interface by name %s failed", BridgeIface)
		tapFile.Close()
		return nil, err
	}

	err = AddToBridge(tapIface, bIface)
	if err != nil {
		glog.Errorf("Add to bridge failed %s %s", BridgeIface, device)
		tapFile.Close()
		return nil, err
	}

	err = NetworkLinkUp(tapIface)
	if err != nil {
		glog.Errorf("Link up device %s failed", tapIface)
		tapFile.Close()
		return nil, err
	}

	mac, err := GenRandomMac()
	if err != nil {
		glog.Errorf("Generate Random Mac address failed")
		tapFile.Close()
		return nil, err
	}

	networkSettings := &Settings{
		Mac:         mac,
		IPAddress:   ip.String(),
		Gateway:     bridgeIPv4Net.IP.String(),
		Bridge:      BridgeIface,
		IPPrefixLen: maskSize,
		Device:      device,
		File:        tapFile,
	}

	return networkSettings, nil
}
Ejemplo n.º 19
0
func mainDaemon(config, host string) {
	glog.V(0).Infof("The config file is %s", config)
	if config == "" {
		config = "/etc/hyper/config"
	}
	eng := engine.New(config)

	d, err := daemon.NewDaemon(eng)
	if err != nil {
		glog.Errorf("The hyperd create failed, %s\n", err.Error())
		return
	}

	stopAll := make(chan os.Signal, 1)
	signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM)
	stop := make(chan os.Signal, 1)
	signal.Notify(stop, syscall.SIGHUP)

	// Install the accepted jobs
	if err := d.Install(eng); err != nil {
		glog.Errorf("The hyperd install failed, %s\n", err.Error())
		return
	}

	glog.V(0).Infof("Hyper daemon: %s %s\n",
		utils.VERSION,
		utils.GITCOMMIT,
	)

	// after the daemon is done setting up we can tell the api to start
	// accepting connections
	if err := eng.Job("acceptconnections").Run(); err != nil {
		glog.Error("the acceptconnections job run failed!\n")
		return
	}
	defaultHost := []string{}
	if host != "" {
		defaultHost = append(defaultHost, host)
	}
	defaultHost = append(defaultHost, "unix:///var/run/hyper.sock")
	if d.Host != "" {
		defaultHost = append(defaultHost, d.Host)
	}

	job := eng.Job("serveapi", defaultHost...)

	// The serve API job never exits unless an error occurs
	// We need to start it as a goroutine and wait on it so
	// daemon doesn't exit
	serveAPIWait := make(chan error)
	go func() {
		if err := job.Run(); err != nil {
			glog.Errorf("ServeAPI error: %v\n", err)
			serveAPIWait <- err
			return
		}
		serveAPIWait <- nil
	}()

	glog.V(0).Info("Daemon has completed initialization\n")

	if err := d.Restore(); err != nil {
		glog.Warningf("Fail to restore the previous VM")
		return
	}

	// Daemon is fully initialized and handling API traffic
	// Wait for serve API job to complete
	select {
	case errAPI := <-serveAPIWait:
		// If we have an error here it is unique to API (as daemonErr would have
		// exited the daemon process above)
		eng.Shutdown()
		if errAPI != nil {
			glog.Warningf("Shutting down due to ServeAPI error: %v\n", errAPI)
		}
		break
	case <-stop:
		d.DestroyAndKeepVm()
		eng.Shutdown()
		break
	case <-stopAll:
		d.DestroyAllVm()
		eng.Shutdown()
		break
	}
}