func (daemon *Daemon) List(item, podId, vmId string, auxiliary bool) (map[string][]string, error) { var ( pl = []*pod.XPod{} list = make(map[string][]string) vmJsonResponse = []string{} podJsonResponse = []string{} containerJsonResponse = []string{} ) hlog.Log(hlog.INFO, "got list request for %s (pod: %s, vm: %s, include aux container: %v)", item, podId, vmId, auxiliary) if item != "pod" && item != "container" && item != "vm" { return list, fmt.Errorf("Can not support %s list!", item) } pl = daemon.snapshotPodList(podId, vmId) for _, p := range pl { if p.IsNone() { p.Log(pod.TRACE, "listing: ignore none status pod") continue } switch item { case "vm": vm := p.SandboxName() if vm == "" { continue } vmJsonResponse = append(vmJsonResponse, p.SandboxStatusString()) case "pod": podJsonResponse = append(podJsonResponse, p.StatusString()) case "container": var cids []string if auxiliary { cids = p.ContainerIds() } else { cids = p.ContainerIdsOf(apitypes.UserContainer_REGULAR) } for _, cid := range cids { status := p.ContainerStatusString(cid) if status != "" { containerJsonResponse = append(containerJsonResponse, status) } } } } switch item { case "vm": list["vmData"] = vmJsonResponse hlog.Log(hlog.TRACE, "list vm result: %v", vmJsonResponse) case "pod": list["podData"] = podJsonResponse hlog.Log(hlog.TRACE, "list pod result: %v", podJsonResponse) case "container": list["cData"] = containerJsonResponse hlog.Log(hlog.TRACE, "list container result: %v", containerJsonResponse) } return list, nil }
func LoadAllPods(db *daemondb.DaemonDB) chan *types.PersistPodLayout { kvchan := db.PrefixList2Chan([]byte(LAYOUT_KEY_PREFIX), nil) if kvchan == nil { return nil } ch := make(chan *types.PersistPodLayout, 128) go func() { for { kv, ok := <-kvchan if !ok { hlog.Log(INFO, "layout loading finished") close(ch) return } hlog.Log(TRACE, "loading layout of container %s", string(kv.K)) var layout types.PersistPodLayout err := proto.Unmarshal(kv.V, &layout) if err != nil { hlog.Log(ERROR, "failed to decode layout of contaienr %s: %v", string(kv.K), err) continue } ch <- &layout } }() return ch }
func startSandbox(f factory.Factory, cpu, mem int, kernel, initrd string) (vm *hypervisor.Vm, err error) { var ( DEFAULT_CPU = 1 DEFAULT_MEM = 128 ) if cpu <= 0 { cpu = DEFAULT_CPU } if mem <= 0 { mem = DEFAULT_MEM } if kernel == "" { hlog.Log(DEBUG, "get sandbox from factory: CPU: %d, Memory %d", cpu, mem) vm, err = f.GetVm(cpu, mem) } else { hlog.Log(DEBUG, "The create sandbox with: kernel=%s, initrd=%s, cpu=%d, memory=%d", kernel, initrd, cpu, mem) config := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: kernel, Initrd: initrd, } vm, err = hypervisor.GetVm("", config, false, hypervisor.HDriver.SupportLazyMode()) } if err != nil { hlog.Log(ERROR, "failed to create a sandbox (cpu=%d, mem=%d kernel=%s initrd=%d): %v", cpu, mem, kernel, initrd, err) } return vm, err }
func initLogCreator(factory *PodFactory, spec *apitypes.UserPod) logger.Creator { if spec.Log.Type == "" { spec.Log.Type = factory.logCfg.Type spec.Log.Config = factory.logCfg.Config } factory.logCfg.Config = spec.Log.Config if spec.Log.Type == "none" { return nil } var ( creator logger.Creator err error ) if err = logger.ValidateLogOpts(spec.Log.Type, spec.Log.Config); err != nil { hlog.Log(ERROR, "invalid log options for pod %s. type: %s; options: %#v", spec.Id, spec.Log.Type, spec.Log.Config) return nil } creator, err = logger.GetLogDriver(spec.Log.Type) if err != nil { hlog.Log(ERROR, "cannot create logCreator for pod %s. type: %s; err: %v", spec.Id, spec.Log.Type, err) return nil } hlog.Log(DEBUG, "configuring log driver [%s] for %s", spec.Log.Type, spec.Id) return creator }
func LoadXPod(factory *PodFactory, layout *types.PersistPodLayout) (*XPod, error) { spec, err := loadGloabalSpec(factory.db, layout.Id) if err != nil { return nil, err } p, err := newXPod(factory, spec) if err != nil { hlog.Log(ERROR, "failed to create pod from spec: %v", err) //remove spec from daemonDB //remove vm from daemonDB return nil, err } err = p.reserveNames(spec.Containers) if err != nil { return nil, err } for _, ix := range layout.Interfaces { if err := p.loadInterface(ix); err != nil { return nil, err } } for _, vid := range layout.Volumes { if err := p.loadVolume(vid); err != nil { return nil, err } } for _, cid := range layout.Containers { if err := p.loadContainer(cid); err != nil { return nil, err } } err = p.loadSandbox() if err != nil { //remove vm from daemonDB return nil, err } err = p.loadPodMeta() if err != nil { return nil, err } //resume logging if p.status == S_POD_RUNNING { for _, c := range p.containers { c.startLogging() } } // don't need to reserve name again, because this is load return p, nil }
func dissociateSandbox(sandbox *hypervisor.Vm, retry int) error { if sandbox == nil { return nil } rval, err := sandbox.ReleaseVm() if err != nil { hlog.Log(WARNING, "SB[%s] failed to release sandbox: %v", sandbox.Id, err) if rval == runvtypes.E_BUSY && retry < maxReleaseRetry { retry++ hlog.Log(DEBUG, "SB[%s] retry release %d", sandbox.Id, retry) time.AfterFunc(100*time.Millisecond, func() { dissociateSandbox(sandbox, retry) }) return nil } hlog.Log(INFO, "SB[%s] shutdown because of failed release", sandbox.Id) sandbox.Kill() return err } return nil }
func NewHyperConfig(config string) *HyperConfig { if config == "" { config = "/etc/hyper/config" } hlog.Log(hlog.INFO, "config file: ", config) c := &HyperConfig{ ConfigFile: config, Root: "/var/lib/hyper", logPrefix: fmt.Sprintf("[%s] ", config), } cfg, err := goconfig.LoadConfigFile(config) if err != nil { c.Log(hlog.ERROR, "read config file failed: %v", err) return nil } hyperRoot, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Root") if hyperRoot != "" { c.Root = hyperRoot } c.StorageDriver, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "StorageDriver") c.Kernel, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "Kernel") c.Initrd, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "Initrd") c.Bridge, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "Bridge") c.BridgeIP, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "BridgeIP") c.Host, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "Host") driver, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Hypervisor") c.Driver = strings.ToLower(driver) c.DisableIptables = cfg.MustBool(goconfig.DEFAULT_SECTION, "DisableIptables", false) c.DefaultLog, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "Logger") c.DefaultLogOpt, _ = cfg.GetSection("Log") c.VmFactoryPolicy, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "VmFactoryPolicy") c.GRPCHost, _ = cfg.GetValue(goconfig.DEFAULT_SECTION, "gRPCHost") c.Log(hlog.INFO, "config items: %#v", c) return c }
func ProbeExistingVolume(v *apitypes.UserVolume, sharedDir string) (*runv.VolumeDescription, error) { if v == nil || v.Source == "" { //do not create volume in this function, it depends on storage driver. return nil, fmt.Errorf("can not generate volume info from %v", v) } var err error = nil vol := &runv.VolumeDescription{ Name: v.Name, Source: v.Source, Format: v.Format, Fstype: v.Fstype, } if v.Option != nil { vol.Options = &runv.VolumeOption{ User: v.Option.User, Monitors: v.Option.Monitors, Keyring: v.Option.Keyring, } } if v.Format == "vfs" { vol.Fstype = "dir" vol.Source, err = storage.MountVFSVolume(v.Source, sharedDir) if err != nil { return nil, err } hlog.Log(DEBUG, "dir %s is bound to %s", v.Source, vol.Source) } else if v.Format == "raw" && v.Fstype == "" { vol.Fstype, err = dm.ProbeFsType(v.Source) if err != nil { vol.Fstype = storage.DEFAULT_VOL_FS err = nil } } return vol, nil }