// Setup lo ip address // options for operation: add or del func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error { execId := fmt.Sprintf("exec-%s", utils.RandStr(10, "alpha")) command := "ip addr " + operation + " dev lo " + ip + "/32" execcmd, err := json.Marshal(strings.Split(command, " ")) if err != nil { return err } tty := &hypervisor.TtyIO{ Callback: make(chan *types.VmResponse, 1), } result := vm.WaitProcess(false, []string{execId}, 60) if result == nil { return fmt.Errorf("can not wait %s, id: %s", command, execId) } if err := vm.Exec(container, execId, string(execcmd), false, tty); err != nil { return err } r, ok := <-result if !ok { return fmt.Errorf("exec failed %s: %s", command, execId) } if r.Code != 0 { return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, r.Code) } return nil }
func ApplyServices(vm *hypervisor.Vm, container string, services []pod.UserService) error { // Update lo ip addresses var command []string oldServices, err := GetServices(vm, container) if err != nil { return err } err = UpdateLoopbackAddress(vm, container, oldServices, services) if err != nil { return err } // Update haproxy config config := path.Join(ServiceVolume, ServiceConfig) vm.WriteFile(container, config, GenerateServiceConfig(services)) command = append(command, "sh") command = append(command, "-c") command = append(command, "haproxy -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf `cat /var/run/haproxy.pid`") execcmd, err := json.Marshal(command) if err != nil { return err } return vm.Exec(nil, nil, string(execcmd), "", container) }
// Setup lo ip address // options for operation: add or del func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error { execId := fmt.Sprintf("exec-%s", utils.RandStr(10, "alpha")) command := "ip addr " + operation + " dev lo " + ip + "/32" execcmd, err := json.Marshal(strings.Split(command, " ")) if err != nil { return err } tty := &hypervisor.TtyIO{ Callback: make(chan *types.VmResponse, 1), } vm.Pod.AddExec(container, execId, command, false) defer vm.Pod.DeleteExec(execId) if err := vm.Exec(container, execId, string(execcmd), false, tty); err != nil { return err } es := vm.Pod.GetExec(execId) if es == nil { return fmt.Errorf("cannot find exec status for %s: %s", command, execId) } if es.ExitCode != 0 { return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, es.ExitCode) } return nil }
func resizeTty(vm *hypervisor.Vm, tag string, outFd uintptr, isTerminalOut bool) { height, width := getTtySize(outFd, isTerminalOut) if height == 0 && width == 0 { return } vm.Tty(tag, height, width) }
// Setup lo ip address // options for operation: add or del func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error { command := "ip addr " + operation + " dev lo " + ip + "/32" execcmd, err := json.Marshal(strings.Split(command, " ")) if err != nil { return err } return vm.Exec(nil, nil, string(execcmd), "", container) }
func hyperHandlePodEvent(vmResponse *types.VmResponse, data interface{}, mypod *hypervisor.PodStatus, vm *hypervisor.Vm) bool { daemon := data.(*Daemon) if vmResponse.Code == types.E_POD_FINISHED { if vm.Keep != types.VM_KEEP_NONE { vm.Status = types.S_VM_IDLE return false } stopLogger(mypod) mypod.SetPodContainerStatus(vmResponse.Data.([]uint32)) vm.Status = types.S_VM_IDLE if mypod.Autoremove == true { daemon.CleanPod(mypod.Id) return false } } else if vmResponse.Code == types.E_VM_SHUTDOWN { if mypod.Status == types.S_POD_RUNNING { stopLogger(mypod) mypod.Status = types.S_POD_SUCCEEDED mypod.SetContainerStatus(types.S_POD_SUCCEEDED) } mypod.Vm = "" daemon.PodStopped(mypod.Id) if mypod.Type == "kubernetes" { cleanup := false switch mypod.Status { case types.S_POD_SUCCEEDED: if mypod.RestartPolicy == "always" { daemon.RestartPod(mypod) break } cleanup = true case types.S_POD_FAILED: if mypod.RestartPolicy != "never" { daemon.RestartPod(mypod) break } cleanup = true default: break } if cleanup { daemon.CleanUpContainer(mypod) daemon.DeleteVolumeId(mypod.Id) } } return true } return false }
func (daemon *Daemon) WaitVmStart(vm *hypervisor.Vm) error { Status, err := vm.GetResponseChan() if err != nil { return err } defer vm.ReleaseResponseChan(Status) vmResponse := <-Status glog.V(1).Infof("Get the response from VM, VM id is %s, response code is %d!", vmResponse.VmId, vmResponse.Code) if vmResponse.Code != types.E_VM_RUNNING { return fmt.Errorf("Vbox does not start successfully") } return nil }
func ApplyServices(vm *hypervisor.Vm, container string, services []pod.UserService) error { // Update lo ip addresses oldServices, err := GetServices(vm, container) if err != nil { return err } err = UpdateLoopbackAddress(vm, container, oldServices, services) if err != nil { return err } // Update haproxy config config := path.Join(ServiceVolume, ServiceConfig) vm.WriteFile(container, config, GenerateServiceConfig(services)) return vm.KillContainer(container, linuxsignal.SIGHUP) }
func (d Docker) ContainerStart(cId string, hostConfig *containertypes.HostConfig) (err error) { var vm *hypervisor.Vm podId := "" if _, ok := d.hyper.CopyPods[cId]; ok { podId = d.hyper.CopyPods[cId] } else if _, ok := d.hyper.BasicPods[cId]; ok { podId = d.hyper.BasicPods[cId] } else { return fmt.Errorf("container %s doesn't belong to pod", cId) } defer func() { d.hyper.Ready <- true if err != nil && d.hyper.Vm != nil { if d.hyper.Status != nil { d.hyper.Vm.ReleaseResponseChan(d.hyper.Status) d.hyper.Status = nil } glog.Infof("ContainerStart failed, KillVm") d.Daemon.KillVm(d.hyper.Vm.Id) d.hyper.Vm = nil } }() vmId := "buildevm-" + utils.RandStr(10, "number") if vm, err = d.Daemon.StartVm(vmId, 1, 512, false); err != nil { return } d.hyper.Vm = vm if d.hyper.Status, err = vm.GetResponseChan(); err != nil { return } if _, _, err = d.Daemon.StartPod(nil, nil, podId, vm.Id, false); err != nil { return } return nil }
func dissociateSandbox(sandbox *hypervisor.Vm, retry int) error { if sandbox == nil { return nil } rval, err := sandbox.ReleaseVm() if err != nil { hlog.Log(WARNING, "SB[%s] failed to release sandbox: %v", sandbox.Id, err) if rval == runvtypes.E_BUSY && retry < maxReleaseRetry { retry++ hlog.Log(DEBUG, "SB[%s] retry release %d", sandbox.Id, retry) time.AfterFunc(100*time.Millisecond, func() { dissociateSandbox(sandbox, retry) }) return nil } hlog.Log(INFO, "SB[%s] shutdown because of failed release", sandbox.Id) sandbox.Kill() return err } return nil }
func ApplyServices(vm *hypervisor.Vm, container string, services []pod.UserService) error { // Update lo ip addresses var command []string oldServices, err := GetServices(vm, container) if err != nil { return err } err = UpdateLoopbackAddress(vm, container, oldServices, services) if err != nil { return err } // Update haproxy config config := path.Join(ServiceVolume, ServiceConfig) vm.WriteFile(container, config, GenerateServiceConfig(services)) command = append(command, "sh") command = append(command, "-c") command = append(command, "haproxy -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf `cat /var/run/haproxy.pid`") execcmd, err := json.Marshal(command) if err != nil { return err } tty := &hypervisor.TtyIO{ Callback: make(chan *types.VmResponse, 1), ClientTag: pod.RandStr(8, "alphanum"), } if err := vm.Exec(tty, container, string(execcmd)); err != nil { return err } if tty.ExitCode != 0 { return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, tty.ExitCode) } return nil }
// Setup lo ip address // options for operation: add or del func SetupLoopbackAddress(vm *hypervisor.Vm, container, ip, operation string) error { command := "ip addr " + operation + " dev lo " + ip + "/32" execcmd, err := json.Marshal(strings.Split(command, " ")) if err != nil { return err } tty := &hypervisor.TtyIO{ Callback: make(chan *types.VmResponse, 1), ClientTag: pod.RandStr(8, "alphanum"), } if err := vm.Exec(tty, container, string(execcmd)); err != nil { return err } if tty.ExitCode != 0 { return fmt.Errorf("exec %s on container %s failed with exit code %d", command, container, tty.ExitCode) } return nil }
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) { cpu := defaultCpus mem := defaultMemory if spec.Linux != nil && spec.Linux.Resources != nil && spec.Linux.Resources.Memory != nil && spec.Linux.Resources.Memory.Limit != nil { mem = int(*spec.Linux.Resources.Memory.Limit >> 20) } kernel := chooseKernel(spec) initrd := chooseInitrd(spec) glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem) var ( vm *hypervisor.Vm err error ) if len(kernel) == 0 && len(initrd) == 0 { vm, err = f.GetVm(cpu, mem) if err != nil { glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with default kernel config") } else if len(kernel) == 0 || len(initrd) == 0 { // if user specify a kernel, they must specify an initrd at the same time return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa") } else { boot := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: kernel, Initrd: initrd, } vm, err = hypervisor.GetVm("", boot, true, false) if err != nil { glog.V(1).Infof("Create VM failed: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with specific kernel config") } r := make(chan api.Result, 1) go func() { r <- vm.WaitInit() }() sandbox := api.SandboxInfoFromOCF(spec) vm.InitSandbox(sandbox) rsp := <-r if !rsp.IsSuccess() { vm.Kill() glog.V(1).Infof("StartPod fail, response: %v", rsp) return nil, fmt.Errorf("StartPod fail") } glog.V(1).Infof("%s init sandbox successfully", rsp.ResultId()) hp := &HyperPod{ vm: vm, Containers: make(map[string]*Container), Processes: make(map[string]*Process), } // create Listener process running in its own netns if err = hp.startNsListener(); err != nil { hp.reap() glog.V(1).Infof("start ns listener fail: %s\n", err.Error()) return nil, err } return hp, nil }
func GetServices(vm *hypervisor.Vm, container string) ([]pod.UserService, error) { var services []pod.UserService config := path.Join(ServiceVolume, ServiceConfig) data, err := vm.ReadFile(container, config) if err != nil { return nil, err } token := bytes.Split(data, []byte("\n")) for _, tok := range token { first := bytes.Split(tok, []byte(" ")) reader := bytes.NewReader(tok) if len(first) > 0 { var t1, t2, t3, t4 string if string(first[0][:]) == "frontend" { s := pod.UserService{ Protocol: "TCP", } _, err := fmt.Fscanf(reader, "%s %s %s", &t1, &t2, &t3) if err != nil { return nil, err } hostport := strings.Split(t3, ":") s.ServiceIP = hostport[0] port, err := strconv.ParseInt(hostport[1], 10, 32) if err != nil { return nil, err } s.ServicePort = int(port) services = append(services, s) } else if string(first[0][:]) == "\tserver" { var idx int var h pod.UserServiceBackend _, err := fmt.Fscanf(reader, "%s %s %s %s", &t1, &t2, &t3, &t4) if err != nil { return nil, err } hostport := strings.Split(t3, ":") h.HostIP = hostport[0] port, err := strconv.ParseInt(hostport[1], 10, 32) if err != nil { return nil, err } h.HostPort = int(port) idxs := strings.Split(t2, "-") idxLong, err := strconv.ParseInt(idxs[1], 10, 32) if err != nil { return nil, err } idx = int(idxLong) services[idx].Hosts = append(services[idx].Hosts, h) } } } return services, nil }
func hyperHandlePodEvent(vmResponse *types.VmResponse, data interface{}, mypod *hypervisor.PodStatus, vm *hypervisor.Vm) bool { daemon := data.(*Daemon) if vmResponse.Code == types.E_POD_FINISHED { if vm.Keep != types.VM_KEEP_NONE { mypod.Vm = "" vm.Status = types.S_VM_IDLE return false } mypod.SetPodContainerStatus(vmResponse.Data.([]uint32)) mypod.Vm = "" vm.Status = types.S_VM_IDLE if mypod.Autoremove == true { daemon.CleanPod(mypod.Id) return false } } else if vmResponse.Code == types.E_VM_SHUTDOWN { if mypod.Status == types.S_POD_RUNNING { mypod.Status = types.S_POD_SUCCEEDED mypod.SetContainerStatus(types.S_POD_SUCCEEDED) } mypod.Vm = "" daemon.RemoveVm(vm.Id) if mypod.Type == "kubernetes" { switch mypod.Status { case types.S_POD_SUCCEEDED: if mypod.RestartPolicy == "always" { daemon.RestartPod(mypod) break } daemon.DeletePodFromDB(mypod.Id) for _, c := range mypod.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err := daemon.DockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } daemon.DeletePodContainerFromDB(mypod.Id) daemon.DeleteVolumeId(mypod.Id) break case types.S_POD_FAILED: if mypod.RestartPolicy != "never" { daemon.RestartPod(mypod) break } daemon.DeletePodFromDB(mypod.Id) for _, c := range mypod.Containers { glog.V(1).Infof("Ready to rm container: %s", c.Id) if _, _, err := daemon.DockerCli.SendCmdDelete(c.Id); err != nil { glog.V(1).Infof("Error to rm container: %s", err.Error()) } } daemon.DeletePodContainerFromDB(mypod.Id) daemon.DeleteVolumeId(mypod.Id) break default: break } } return true } return false }
func (daemon *Daemon) StartPod(podId, podArgs, vmId string, config interface{}, lazy, autoremove bool, keep int) (int, string, error) { var ( podData []byte err error mypod *hypervisor.Pod vm *hypervisor.Vm = nil ) if podArgs == "" { var ok bool mypod, ok = daemon.PodList[podId] if !ok { return -1, "", fmt.Errorf("Can not find the POD instance of %s", podId) } podData, err = daemon.GetPodByName(podId) if err != nil { return -1, "", err } } else { podData = []byte(podArgs) if err := daemon.CreatePod(podId, podArgs, nil, autoremove); err != nil { glog.Error(err.Error()) return -1, "", err } mypod = daemon.PodList[podId] } userPod, err := pod.ProcessPodBytes(podData) if err != nil { return -1, "", err } defer func() { if vm != nil && err != nil && vmId == "" { daemon.KillVm(vm.Id) } }() if vmId == "" { glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.Kernel, daemon.Initrd) var ( cpu = 1 mem = 128 ) if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } b := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: daemon.Kernel, Initrd: daemon.Initrd, Bios: daemon.Bios, Cbfs: daemon.Cbfs, Vbox: daemon.VboxImage, } vm = daemon.NewVm("", cpu, mem, lazy, keep) err = vm.Launch(b) if err != nil { return -1, "", err } daemon.AddVm(vm) } else { var ok bool vm, ok = daemon.VmList[vmId] if !ok { err = fmt.Errorf("The VM %s doesn't exist", vmId) return -1, "", err } /* FIXME: check if any pod is running on this vm? */ glog.Infof("find vm:%s", vm.Id) if userPod.Resource.Vcpu != vm.Cpu { err = fmt.Errorf("The new pod's cpu setting is different with the VM's cpu") return -1, "", err } if userPod.Resource.Memory != vm.Mem { err = fmt.Errorf("The new pod's memory setting is different with the VM's memory") return -1, "", err } } fmt.Printf("POD id is %s\n", podId) containerInfoList, volumeInfoList, err := daemon.ParsePod(mypod, userPod, vm.Id) if err != nil { return -1, "", err } vmResponse := vm.StartPod(mypod, userPod, containerInfoList, volumeInfoList) if vmResponse.Data == nil { err = fmt.Errorf("VM response data is nil") return vmResponse.Code, vmResponse.Cause, err } data := vmResponse.Data.([]byte) err = daemon.UpdateVmData(vm.Id, data) if err != nil { glog.Error(err.Error()) return -1, "", err } // add or update the Vm info for POD if err := daemon.UpdateVmByPod(podId, vm.Id); err != nil { glog.Error(err.Error()) return -1, "", err } // XXX we should not close vmStatus chan, it will be closed in shutdown process return vmResponse.Code, vmResponse.Cause, nil }
func createHyperPod(f factory.Factory, spec *specs.Spec, defaultCpus int, defaultMemory int) (*HyperPod, error) { podId := fmt.Sprintf("pod-%s", pod.RandStr(10, "alpha")) userPod := pod.ConvertOCF2PureUserPod(spec) podStatus := hypervisor.NewPod(podId, userPod, nil) cpu := defaultCpus if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } mem := defaultMemory if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } kernel := chooseKernel(spec) initrd := chooseInitrd(spec) glog.V(3).Infof("Using kernel: %s; Initrd: %s; vCPU: %d; Memory %d", kernel, initrd, cpu, mem) var ( vm *hypervisor.Vm err error ) if len(kernel) == 0 && len(initrd) == 0 { vm, err = f.GetVm(cpu, mem) if err != nil { glog.V(1).Infof("Create VM failed with default kernel config: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with default kernel config") } else if len(kernel) == 0 || len(initrd) == 0 { // if user specify a kernel, they must specify an initrd at the same time return nil, fmt.Errorf("You must specify an initrd if you specify a kernel, or vice-versa") } else { boot := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: kernel, Initrd: initrd, } vm, err = hypervisor.GetVm("", boot, true, false) if err != nil { glog.V(1).Infof("Create VM failed: %s", err.Error()) return nil, err } glog.V(3).Infof("Creating VM with specific kernel config") } Response := vm.StartPod(podStatus, userPod, nil, nil) if Response.Data == nil { vm.Kill() glog.V(1).Infof("StartPod fail: QEMU response data is nil\n") return nil, fmt.Errorf("StartPod fail") } glog.V(1).Infof("result: code %d %s\n", Response.Code, Response.Cause) hp := &HyperPod{ userPod: userPod, podStatus: podStatus, vm: vm, Containers: make(map[string]*Container), Processes: make(map[string]*Process), } // create Listener process running in its own netns if err = hp.startNsListener(); err != nil { hp.reap() glog.V(1).Infof("start ns listener fail: %s\n", err.Error()) return nil, err } return hp, nil }
func (daemon *Daemon) StartPod(podId, podArgs, vmId string, config interface{}, lazy, autoremove bool, keep int, streams []*hypervisor.TtyIO) (int, string, error) { glog.V(1).Infof("podArgs: %s", podArgs) var ( podData []byte err error mypod *hypervisor.Pod vm *hypervisor.Vm = nil ) mypod, podData, err = daemon.GetPod(podId, podArgs, autoremove) if err != nil { return -1, "", err } userPod, err := daemon.ProcessPodBytes(podData, podId) if err != nil { return -1, "", err } if !userPod.Tty && streams != nil && len(streams) > 0 { cause := "Spec does not support TTY, but IO streams are provided" return -1, cause, errors.New(cause) } vm, err = daemon.GetVM(vmId, &userPod.Resource, lazy, keep) if err != nil { return -1, "", err } defer func() { if vm != nil && err != nil && vmId == "" { daemon.KillVm(vm.Id) } }() containerInfoList, volumeInfoList, err := daemon.PreparePod(mypod, userPod, vm.Id) if err != nil { return -1, "", err } for idx, str := range streams { if idx >= len(userPod.Containers) { break } err = vm.Attach(str.Stdin, str.Stdout, str.ClientTag, containerInfoList[idx].Id, str.Callback, nil) if err != nil { glog.Errorf("Failed to attach client %s before start pod", str.ClientTag) return -1, "", err } glog.V(1).Infof("Attach client %s before start pod", str.ClientTag) } vmResponse := vm.StartPod(mypod, userPod, containerInfoList, volumeInfoList) if streams != nil && len(streams) > 0 && vmResponse.Code == types.E_OK { return 0, "", nil } if vmResponse.Data == nil { err = fmt.Errorf("VM response data is nil") return vmResponse.Code, vmResponse.Cause, err } data := vmResponse.Data.([]byte) err = daemon.UpdateVmData(vm.Id, data) if err != nil { glog.Error(err.Error()) return -1, "", err } // add or update the Vm info for POD if err := daemon.UpdateVmByPod(podId, vm.Id); err != nil { glog.Error(err.Error()) return -1, "", err } // XXX we should not close vmStatus chan, it will be closed in shutdown process return vmResponse.Code, vmResponse.Cause, nil }