func (vc *VBoxContext) Kill(ctx *hypervisor.VmContext) { go func() { // detach the bootable Disk m := vc.Machine if m == nil { return } name := m.Name m.Poweroff() if err := vc.detachDisk(m.Name, 0); err != nil { glog.Warningf("failed to detach the disk of VBox(%s), %s", name, err.Error()) } if ctx.Keep < types.VM_KEEP_AFTER_SHUTDOWN { if err := m.Delete(); err != nil { glog.Warningf("failed to delete the VBox(%s), %s", name, err.Error()) } delete(vc.Driver.Machines, name) args := fmt.Sprintf("ps aux | grep %s | grep -v grep | awk '{print \"kill -9 \" $2}' | sh", name) cmd := exec.Command("/bin/sh", "-c", args) if err := cmd.Run(); err != nil { ctx.Hub <- &hypervisor.VmKilledEvent{Success: false} return } os.RemoveAll(path.Join(hypervisor.BaseDir, "vm", name)) } ctx.Hub <- &hypervisor.VmKilledEvent{Success: true} }() }
func (daemon *Daemon) CmdTty(job *engine.Job) (err error) { if len(job.Args) < 3 { return nil } var ( podID = job.Args[0] tag = job.Args[1] h = job.Args[2] w = job.Args[3] container string vmid string ) if strings.Contains(podID, "pod-") { container = "" vmid, err = daemon.GetVmByPodId(podID) if err != nil { return err } } else if strings.Contains(podID, "vm-") { vmid = podID } else { container = podID podID, err = daemon.GetPodByContainer(container) if err != nil { return err } vmid, err = daemon.GetVmByPodId(podID) if err != nil { return err } } vm, ok := daemon.VmList[vmid] if !ok { return fmt.Errorf("vm %s doesn't exist!") } row, err := strconv.Atoi(h) if err != nil { glog.Warningf("Window row %s incorrect!", h) } column, err := strconv.Atoi(w) if err != nil { glog.Warningf("Window column %s incorrect!", h) } err = vm.Tty(tag, row, column) if err != nil { return err } glog.V(1).Infof("Success to resize the tty!") return nil }
func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig) ([]string, error) { var warnings []string if hostConfig == nil { return warnings, nil } if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") } if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") glog.Warningf("Your kernel does not support memory limit capabilities. Limitation discarded.") hostConfig.Memory = 0 } if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") glog.Warningf("Your kernel does not support swap limit capabilities, memory limited without swap.") hostConfig.MemorySwap = -1 } if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") } if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") } if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod { warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") glog.Warningf("Your kernel does not support CPU cfs period. Period discarded.") hostConfig.CpuPeriod = 0 } if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota { warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") glog.Warningf("Your kernel does not support CPU cfs quota. Quota discarded.") hostConfig.CpuQuota = 0 } if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") } if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable { hostConfig.OomKillDisable = false return warnings, fmt.Errorf("Your kernel does not support oom kill disable.") } if daemon.SystemConfig().IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") glog.Warningf("IPv4 forwarding is disabled. Networking will not work") } return warnings, nil }
func waitInitReady(ctx *VmContext) { conn, err := UnixSocketConnect(ctx.HyperSockName) if err != nil { glog.Error("Cannot connect to hyper socket ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "Cannot connect to hyper socket " + err.Error(), } return } glog.Info("Wating for init messages...") msg, err := readVmMessage(conn.(*net.UnixConn)) if err != nil { glog.Error("read init message failed... ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "read init message failed... " + err.Error(), } conn.Close() } else if msg.code == INIT_READY { glog.Info("Get init ready message") ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)} go waitCmdToInit(ctx, conn.(*net.UnixConn)) } else { glog.Warningf("Get init message %d", msg.code) ctx.Hub <- &InitFailedEvent{ Reason: fmt.Sprintf("Get init message %d", msg.code), } conn.Close() } }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request glog.V(0).Infof("Calling %s %s", localMethod, localRoute) if logging { glog.V(1).Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = utils.APIVERSION } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } if version.GreaterThan(utils.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func (vc *VBoxContext) RemoveDir(name string) error { if err := vc.Machine.RemoveSharedFolder(vc.Machine.Name, name); err != nil { glog.Warningf("The shared folder is failed to remove, since %s", err.Error()) return err } return nil }
func (d *Driver) createDisk(id, parent string) error { // create a raw image if _, err := os.Stat(fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)); err == nil { return nil } var ( parentDisk string = d.BaseImage() idDisk string = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id) ) if parent != "" { parentDisk = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), parent) } params := fmt.Sprintf("vboxmanage createhd --filename %s --diffparent %s --format VDI", idDisk, parentDisk) cmd := exec.Command("/bin/sh", "-c", params) if output, err := cmd.CombinedOutput(); err != nil { glog.Warningf(string(output)) if strings.Contains(string(output), "not found in the media registry") { if err := virtualbox.RegisterDisk(d.pullVm, d.pullVm, parentDisk, 4); err != nil { return err } } } os.Chmod(idDisk, 0755) params = fmt.Sprintf("vboxmanage closemedium %s", idDisk) cmd = exec.Command("/bin/sh", "-c", params) if output, err := cmd.CombinedOutput(); err != nil { glog.Error(err.Error()) return fmt.Errorf("error to run vboxmanage closemedium, %s", output) } return nil }
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { buf := new(syscall.Statfs_t) if err := syscall.Statfs(loopFile, buf); err != nil { glog.Warningf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } return buf.Bfree * uint64(buf.Bsize), nil }
// Poweroff forcefully stops the machine. State is lost and might corrupt the disk image. func (m *Machine) Poweroff() error { switch m.State { case Poweroff, Aborted, Saved: glog.Warningf("The machine status is Poweroff, Aborted, Saved") return nil } return vbm("controlvm", m.Name, "poweroff") }
func (d *Driver) Setup() (err error) { var ( vm *hypervisor.Vm ids []string parentIds []string ) if d.daemon == nil { d.daemon, err = GetDaemon() if err != nil { return err } } vm, err = d.daemon.StartVm(d.pullVm, 1, 64, false, types.VM_KEEP_AFTER_SHUTDOWN) if err != nil { glog.Errorf(err.Error()) return err } defer func() { if err != nil { d.daemon.KillVm(vm.Id) } }() if err = d.daemon.WaitVmStart(vm); err != nil { glog.Error(err) return err } if err = virtualbox.RegisterDisk(d.pullVm, d.pullVm, d.BaseImage(), 4); err != nil { glog.Errorf(err.Error()) return err } ids, err = loadIds(path.Join(d.RootPath(), "layers")) if err != nil { return err } for _, id := range ids { if d.disks[id] == true { continue } parentIds, err = getParentIds(d.RootPath(), id) if err != nil { glog.Warningf(err.Error()) continue } for _, cid := range parentIds { if d.disks[cid] == true { continue } d.Exists(cid) d.disks[cid] = true } d.disks[id] = true } return nil }
func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 if k, err := kernel.GetKernelVersion(); err != nil { glog.Warningf("%s", err) } else { if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { glog.Warningf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String()) } } } return nil }
func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { if loopFile != "" { fi, err := os.Stat(loopFile) if err != nil { glog.Warningf("Couldn't stat loopfile %v: %v", loopFile, err) return false, err } return fi.Mode().IsRegular(), nil } return false, nil }
func UmountVolume(shareDir, volPath string, name string, hub chan VmEvent) { mount := path.Join(shareDir, volPath) success := true err := syscall.Unmount(mount, 0) if err != nil { glog.Warningf("Cannot umount volume %s: %s", mount, err.Error()) err = syscall.Unmount(mount, syscall.MNT_DETACH) if err != nil { glog.Warningf("Cannot lazy umount volume %s: %s", mount, err.Error()) success = false } else { success = true } } if success == true { os.Remove(mount) } // After umount that device, we need to delete it hub <- &VolumeUnmounted{Name: name, Success: success} }
func (daemon *Daemon) Restore() error { if daemon.GetPodNum() == 0 { return nil } podList := map[string]string{} iter := daemon.db.NewIterator(util.BytesPrefix([]byte("pod-")), nil) for iter.Next() { key := iter.Key() value := iter.Value() if strings.Contains(string(key), "pod-container-") { glog.V(1).Infof(string(value)) continue } glog.V(1).Infof("Get the pod item, pod is %s!", key) err := daemon.db.Delete(key, nil) if err != nil { return err } podList[string(key)[4:]] = string(value) } iter.Release() err := iter.Error() if err != nil { return err } daemon.PodList.Lock() glog.V(2).Infof("lock PodList") defer glog.V(2).Infof("unlock PodList") defer daemon.PodList.Unlock() for k, v := range podList { err = daemon.CreatePod(k, v, false) if err != nil { glog.Warningf("Got a unexpected error, %s", err.Error()) continue } vmId, err := daemon.DbGetVmByPod(k) if err != nil { glog.V(1).Info(err.Error(), " for ", k) continue } p, _ := daemon.PodList.Get(k) if err := p.AssociateVm(daemon, string(vmId)); err != nil { glog.V(1).Info("Some problem during associate vm %s to pod %s, %v", string(vmId), k, err) // continue to next } } return nil }
func (daemon *Daemon) CleanPod(podId string) (int, string, error) { var ( code = 0 cause = "" err error ) os.RemoveAll(path.Join(utils.HYPER_ROOT, "services", podId)) pod, ok := daemon.PodList.Get(podId) if !ok { return -1, "", fmt.Errorf("Can not find that Pod(%s)", podId) } if pod.status.Status != types.S_POD_RUNNING { // If the pod type is kubernetes, we just remove the pod from the pod list. // The persistent data has been removed since we got the E_VM_SHUTDOWN event. if pod.status.Type == "kubernetes" { daemon.RemovePod(podId) code = types.E_OK } else { daemon.DeletePodFromDB(podId) for _, c := range pod.status.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err = daemon.DockerCli.SendCmdDelete(c.Id); err != nil { glog.Warningf("Error to rm container: %s", err.Error()) } } daemon.RemovePod(podId) daemon.DeletePodContainerFromDB(podId) daemon.DeleteVolumeId(podId) code = types.E_OK } } else { code, cause, err = daemon.StopPod(podId, "yes") if err != nil { return -1, "", err } if code == types.E_VM_SHUTDOWN { daemon.DeletePodFromDB(podId) for _, c := range pod.status.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err = daemon.DockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } daemon.RemovePod(podId) daemon.DeletePodContainerFromDB(podId) daemon.DeleteVolumeId(podId) } code = types.E_OK } return code, cause, nil }
// For shared directory between host and guest OS func (vc *VBoxContext) AddDir(name, path string, readonly bool) error { sFolder := virtualbox.SharedFolder{ Name: name, Path: path, Automount: false, Transient: false, Readonly: readonly, } if err := vc.Machine.AddSharedFolder(vc.Machine.Name, sFolder); err != nil { glog.Warningf("The shared folder is failed to add, since %s", err.Error()) return err } return nil }
func (vc *VBoxContext) Shutdown(ctx *hypervisor.VmContext) { go func() { // detach the bootable Disk m := vc.Machine if m == nil { return } name := m.Name m.Poweroff() time.Sleep(1 * time.Second) if err := vc.detachDisk(name, 0); err != nil { glog.Warningf("failed to detach the disk of VBox(%s), %s", name, err.Error()) } if ctx.Keep < types.VM_KEEP_AFTER_SHUTDOWN { if err := m.Delete(); err != nil { glog.Warningf("failed to delete the VBox(%s), %s", name, err.Error()) } os.RemoveAll(path.Join(hypervisor.BaseDir, "vm", name)) } delete(vc.Driver.Machines, name) ctx.Hub <- &hypervisor.VmExit{} }() }
func (daemon *Daemon) Restore() error { if daemon.GetPodNum() == 0 { return nil } podList := map[string]string{} iter := daemon.db.NewIterator(util.BytesPrefix([]byte("pod-")), nil) for iter.Next() { key := iter.Key() value := iter.Value() if strings.Contains(string(key), "pod-container-") { glog.V(1).Infof(string(value)) continue } glog.V(1).Infof("Get the pod item, pod is %s!", key) err := daemon.db.Delete(key, nil) if err != nil { return err } podList[string(key)[4:]] = string(value) } iter.Release() err := iter.Error() if err != nil { return err } daemon.PodsMutex.Lock() glog.V(2).Infof("lock PodList") defer glog.V(2).Infof("unlock PodList") defer daemon.PodsMutex.Unlock() for k, v := range podList { err = daemon.CreatePod(k, v, nil, false) if err != nil { glog.Warningf("Got a unexpected error, %s", err.Error()) continue } vmId, err := daemon.GetVmByPod(k) if err != nil { glog.V(1).Info(err.Error(), " for ", k) continue } daemon.PodList[k].Vm = string(vmId) } // associate all VMs daemon.AssociateAllVms() return nil }
func aufsUnmount(target string) error { glog.V(1).Infof("Ready to unmount the target : %s", target) if _, err := os.Stat(target); err != nil && os.IsNotExist(err) { return nil } cmdString := fmt.Sprintf("auplink %s flush", target) cmd := exec.Command("/bin/sh", "-c", cmdString) if err := cmd.Run(); err != nil { glog.Warningf("Couldn't run auplink command : %s\n%s\n", err.Error()) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
func (vd *VBoxDriver) InitNetwork(bIface, bIP string) error { var i = 0 if bIP == "" { network.BridgeIP = network.DefaultBridgeIP } else { network.BridgeIP = bIP } bip, ipnet, err := net.ParseCIDR(network.BridgeIP) if err != nil { glog.Errorf(err.Error()) return err } gateway := bip.Mask(ipnet.Mask) inc(gateway, 2) if !ipnet.Contains(gateway) { glog.Errorf(err.Error()) return fmt.Errorf("get Gateway from BridgeIP %s failed", network.BridgeIP) } prefixSize, _ := ipnet.Mask.Size() _, network.BridgeIPv4Net, err = net.ParseCIDR(gateway.String() + fmt.Sprintf("/%d", prefixSize)) if err != nil { glog.Errorf(err.Error()) return err } network.BridgeIPv4Net.IP = gateway glog.Warningf(network.BridgeIPv4Net.String()) /* * Filter the IPs which can not be used for VMs */ bip = bip.Mask(ipnet.Mask) for inc(bip, 1); ipnet.Contains(bip) && i < 2; inc(bip, 1) { i++ glog.V(3).Infof("Try %s", bip.String()) _, err = network.IpAllocator.RequestIP(network.BridgeIPv4Net, bip) if err != nil { glog.Errorf(err.Error()) return err } } return nil }
func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(args); err != nil { return err } req, err := http.NewRequest("POST", "/"+serviceMethod, &buf) if err != nil { return err } req.Header.Add("Accept", versionMimetype) req.URL.Scheme = "http" req.URL.Host = c.addr var retries int start := time.Now() for { resp, err := c.http.Do(req) if err != nil { if !retry { return err } timeOff := backoff(retries) if abort(start, timeOff) { return err } retries++ glog.Warningf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff) time.Sleep(timeOff) continue } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { remoteErr, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("Plugin Error: %s", err) } return fmt.Errorf("Plugin Error: %s", remoteErr) } return json.NewDecoder(resp.Body).Decode(&ret) } }
func (d *Driver) Setup() error { if d.daemon == nil { daemon, err := GetDaemon() if err != nil { return err } d.daemon = daemon } if vm, err := d.daemon.StartVm(d.pullVm, 1, 64, false, types.VM_KEEP_AFTER_SHUTDOWN); err != nil { glog.Errorf(err.Error()) return err } else { d.daemon.AddVm(vm) } if err := virtualbox.RegisterDisk(d.pullVm, d.pullVm, d.BaseImage(), 4); err != nil { glog.Errorf(err.Error()) return err } ids, err := loadIds(path.Join(d.RootPath(), "layers")) if err != nil { return err } for _, id := range ids { if d.disks[id] == true { continue } parentIds, err := getParentIds(d.RootPath(), id) if err != nil { glog.Warningf(err.Error()) continue } for _, cid := range parentIds { if d.disks[cid] == true { continue } d.Exists(cid) d.disks[cid] = true } d.disks[id] = true } return nil }
func UmountDMDevice(deviceFullPath, name string, hub chan VmEvent) { args := fmt.Sprintf("dmsetup remove -f %s", deviceFullPath) cmd := exec.Command("/bin/sh", "-c", args) success := true if output, err := cmd.CombinedOutput(); err != nil { glog.Warningf("Cannot umount device %s: %s, %s", deviceFullPath, err.Error(), output) // retry cmd := exec.Command("/bin/sh", "-c", args) if err := cmd.Run(); err != nil { success = false } } else { // Command was successful success = true } // After umount that device, we need to delete it hub <- &BlockdevRemovedEvent{Name: name, Success: success} }
func UmountVfsContainer(shareDir, image string, index int, hub chan VmEvent) { mount := path.Join(shareDir, image) success := true for i := 0; i < 10; i++ { time.Sleep(3 * time.Second / 1000) err := syscall.Unlink(mount) if err != nil { if !strings.Contains(strings.ToLower(err.Error()), "device or resource busy") { success = true break } glog.Warningf("Cannot umount vfs %s: %s", mount, err.Error()) success = false } else { success = true break } } hub <- &ContainerUnmounted{Index: index, Success: success} }
func (vc *VBoxContext) RemoveDisk(ctx *hypervisor.VmContext, filename, format string, id int, callback hypervisor.VmEvent) { // go func() { m := vc.Machine if m == nil { return } if err := vc.detachDisk(m.Name, id); err != nil { glog.Warningf("failed to detach the disk of VBox(%s), %s", m.Name, err.Error()) /* ctx.Hub <- &hypervisor.DeviceFailed{ Session: callback, } */ } glog.V(1).Infof("Disk %s remove succeeded", filename) ctx.Hub <- callback return // }() }
func lookupGidByName(nameOrGid string) (int, error) { groupFile, err := user.GetGroupPath() if err != nil { return -1, err } groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { return -1, err } if groups != nil && len(groups) > 0 { return groups[0].Gid, nil } gid, err := strconv.Atoi(nameOrGid) if err == nil { glog.Warningf("Could not find GID %d", gid) return gid, nil } return -1, fmt.Errorf("Group %s not found", nameOrGid) }
func migrateKey() (err error) { // Migrate trust key if exists at ~/.docker/key.json and owned by current user oldPath := filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile) newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { defer func() { // Ensure old path is removed if no error occurred if err == nil { err = os.Remove(oldPath) } else { glog.Warningf("Key migration failed, key file not removed at %s", oldPath) } }() if err := os.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { return fmt.Errorf("Unable to create daemon configuration directory: %s", err) } newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return fmt.Errorf("error creating key file %q: %s", newPath, err) } defer newFile.Close() oldFile, err := os.Open(oldPath) if err != nil { return fmt.Errorf("error opening key file %q: %s", oldPath, err) } defer oldFile.Close() if _, err := io.Copy(newFile, oldFile); err != nil { return fmt.Errorf("error copying key: %s", err) } glog.Infof("Migrated key from %s to %s", oldPath, newPath) } return nil }
func UmountAufsContainer(shareDir, image string, index int, hub chan VmEvent) { glog.Warningf("Non support") }
func mainDaemon(config, host string, flDisableIptables bool) { glog.V(1).Infof("The config file is %s", config) if config == "" { config = "/etc/hyper/config" } if _, err := os.Stat(config); err != nil { if os.IsNotExist(err) { glog.Errorf("Can not find config file(%s)", config) return } glog.Errorf(err.Error()) return } os.Setenv("HYPER_CONFIG", config) cfg, err := goconfig.LoadConfigFile(config) if err != nil { glog.Errorf("Read config file (%s) failed, %s", config, err.Error()) return } hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root") if hyperRoot == "" { hyperRoot = "/var/lib/hyper" } utils.HYPER_ROOT = hyperRoot if _, err := os.Stat(hyperRoot); err != nil { if err := os.MkdirAll(hyperRoot, 0755); err != nil { glog.Errorf(err.Error()) return } } storageDriver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "StorageDriver") if storageDriver != "" { graphdriver.DefaultDriver = storageDriver } eng := engine.New(config) docker.Init() d, err := daemon.NewDaemon(eng) if err != nil { glog.Errorf("The hyperd create failed, %s", err.Error()) return } var drivers []string if runtime.GOOS == "darwin" { drivers = []string{"vbox"} } else { driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor") if driver != "" { drivers = []string{driver} } else { drivers = []string{"xen", "kvm", "vbox"} } } for _, dri := range drivers { driver := strings.ToLower(dri) if hypervisor.HDriver, err = driverloader.Probe(driver); err != nil { glog.Warningf("%s", err.Error()) continue } else { d.Hypervisor = driver glog.Infof("The hypervisor's driver is %s", driver) break } } if hypervisor.HDriver == nil { glog.Errorf("Please specify the exec driver, such as 'kvm', 'xen' or 'vbox'") return } disableIptables := cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false) if err = hypervisor.InitNetwork(d.BridgeIface, d.BridgeIP, disableIptables || flDisableIptables); err != nil { glog.Errorf("InitNetwork failed, %s", err.Error()) return } defaultLog, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Logger") defaultLogCfg, _ := cfg.GetSection("Log") d.DefaultLogCfg(defaultLog, defaultLogCfg) // Set the daemon object as the global varibal // which will be used for puller and builder utils.SetDaemon(d) if err := d.DockerCli.Setup(); err != nil { glog.Error(err.Error()) return } stopAll := make(chan os.Signal, 1) signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM) stop := make(chan os.Signal, 1) signal.Notify(stop, syscall.SIGHUP) // Install the accepted jobs if err := d.Install(eng); err != nil { glog.Errorf("The hyperd install failed, %s", err.Error()) return } glog.V(0).Infof("Hyper daemon: %s %s", utils.VERSION, utils.GITCOMMIT, ) // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { glog.Error("the acceptconnections job run failed!") return } defaultHost := []string{} if host != "" { defaultHost = append(defaultHost, host) } defaultHost = append(defaultHost, "unix:///var/run/hyper.sock") if d.Host != "" { defaultHost = append(defaultHost, d.Host) } job := eng.Job("serveapi", defaultHost...) // The serve API job never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := job.Run(); err != nil { glog.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return } serveAPIWait <- nil }() glog.V(0).Info("Daemon has completed initialization") if err := d.Restore(); err != nil { glog.Warningf("Fail to restore the previous VM") return } // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete select { case errAPI := <-serveAPIWait: // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) eng.Shutdown() if errAPI != nil { glog.Warningf("Shutting down due to ServeAPI error: %v", errAPI) } break case <-stop: d.DestroyAndKeepVm() eng.Shutdown() break case <-stopAll: d.DestroyAllVm() eng.Shutdown() break } }
func (daemon *Daemon) CreatePod(podId, podArgs string, config interface{}, autoremove bool) (err error) { glog.V(1).Infof("podArgs: %s", podArgs) var ( userPod *pod.UserPod containerIds []string cId []byte ) userPod, err = pod.ProcessPodBytes([]byte(podArgs)) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return err } if err = userPod.Validate(); err != nil { return err } mypod := hypervisor.NewPod(podId, userPod) mypod.Handler.Handle = hyperHandlePodEvent mypod.Handler.Data = daemon mypod.Autoremove = autoremove defer func() { if err != nil { if containerIds == nil { daemon.DeletePodFromDB(podId) if mypod != nil { for _, c := range mypod.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err = daemon.DockerCli.SendCmdDelete(c.Id); err != nil { glog.Warningf("Error to rm container: %s", err.Error()) } } } daemon.RemovePod(podId) daemon.DeletePodContainerFromDB(podId) } } }() // store the UserPod into the db if err = daemon.WritePodToDB(podId, []byte(podArgs)); err != nil { glog.V(1).Info("Found an error while saveing the POD file") return err } containerIds, err = daemon.GetPodContainersByName(podId) if err != nil { glog.V(1).Info(err.Error()) } if containerIds != nil { for _, id := range containerIds { var ( name string image string ) if jsonResponse, err := daemon.DockerCli.GetContainerInfo(id); err == nil { name = jsonResponse.Name image = jsonResponse.Config.Image } mypod.AddContainer(id, name, image, []string{}, types.S_POD_CREATED) } } else { // Process the 'Containers' section glog.V(1).Info("Process the Containers section in POD SPEC\n") for _, c := range userPod.Containers { imgName := c.Image cId, _, err = daemon.DockerCli.SendCmdCreate(c.Name, imgName, []string{}, nil) if err != nil { glog.Error(err.Error()) return err } var ( name string image string ) if jsonResponse, err := daemon.DockerCli.GetContainerInfo(string(cId)); err == nil { name = jsonResponse.Name image = jsonResponse.Config.Image } mypod.AddContainer(string(cId), name, image, []string{}, types.S_POD_CREATED) } } daemon.AddPod(mypod) if err = daemon.WritePodAndContainers(podId); err != nil { glog.V(1).Info("Found an error while saveing the Containers info") return err } return nil }