// This function will only be invoked during daemon start func (daemon *Daemon) AssociateAllVms() error { for _, mypod := range daemon.PodList { if mypod.Vm == "" { continue } podData, err := daemon.GetPodByName(mypod.Id) if err != nil { continue } userPod, err := pod.ProcessPodBytes(podData) if err != nil { continue } glog.V(1).Infof("Associate the POD(%s) with VM(%s)", mypod.Id, mypod.Vm) vmData, err := daemon.GetVmData(mypod.Vm) if err != nil { continue } glog.V(1).Infof("The data for vm(%s) is %v", mypod.Vm, vmData) vm := daemon.NewVm(mypod.Vm, userPod.Resource.Vcpu, userPod.Resource.Memory, false, types.VM_KEEP_NONE) err = vm.AssociateVm(mypod, vmData) if err != nil { continue } daemon.AddVm(vm) } return nil }
func (cli *HyperClient) PullImages(data string) error { userpod, err := pod.ProcessPodBytes([]byte(data)) if err != nil { return err } for _, c := range userpod.Containers { if err = cli.PullImage(c.Image); err != nil { return err } } return nil }
func ProcessPodBytes(body []byte, podId string) (*pod.UserPod, error) { var containers []pod.UserContainer var serviceDir string = path.Join(utils.HYPER_ROOT, "services", podId) userPod, err := pod.ProcessPodBytes(body) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return nil, err } if len(userPod.Services) == 0 { return userPod, nil } userPod.Type = "service-discovery" serviceContainer := pod.UserContainer{ Name: ServiceDiscoveryContainerName(userPod.Name), Image: servicediscovery.ServiceImage, Command: []string{"haproxy", "-D", "-f", "/usr/local/etc/haproxy/haproxy.cfg", "-p", "/var/run/haproxy.pid"}, } serviceVolRef := pod.UserVolumeReference{ Volume: "service-volume", Path: servicediscovery.ServiceVolume, ReadOnly: false, } /* PrepareServices will check service volume */ serviceVolume := pod.UserVolume{ Name: "service-volume", Source: serviceDir, Driver: "vfs", } userPod.Volumes = append(userPod.Volumes, serviceVolume) serviceContainer.Volumes = append(serviceContainer.Volumes, serviceVolRef) containers = append(containers, serviceContainer) for _, c := range userPod.Containers { containers = append(containers, c) } userPod.Containers = containers return userPod, nil }
func (cli *HyperClient) PullImages(data string) error { userpod, err := pod.ProcessPodBytes([]byte(data)) if err != nil { return err } for _, c := range userpod.Containers { if err = cli.PullImage(c.Image); err != nil { return err } } /* Hack here, pull service discovery image `haproxy` */ if len(userpod.Services) > 0 { return cli.PullImage("haproxy") } return nil }
func NewPod(rawSpec []byte, id string, data interface{}, autoremove bool) (*Pod, error) { var err error p := &Pod{ id: id, ttyList: make(map[string]*hypervisor.TtyIO), } if p.spec, err = pod.ProcessPodBytes(rawSpec); err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return nil, err } if err = p.init(data, autoremove); err != nil { return nil, err } return p, nil }
func (daemon *Daemon) StartPod(podId, podArgs, vmId string, config interface{}, lazy, autoremove bool, keep int) (int, string, error) { var ( podData []byte err error mypod *hypervisor.Pod vm *hypervisor.Vm = nil ) if podArgs == "" { var ok bool mypod, ok = daemon.PodList[podId] if !ok { return -1, "", fmt.Errorf("Can not find the POD instance of %s", podId) } podData, err = daemon.GetPodByName(podId) if err != nil { return -1, "", err } } else { podData = []byte(podArgs) if err := daemon.CreatePod(podId, podArgs, nil, autoremove); err != nil { glog.Error(err.Error()) return -1, "", err } mypod = daemon.PodList[podId] } userPod, err := pod.ProcessPodBytes(podData) if err != nil { return -1, "", err } defer func() { if vm != nil && err != nil && vmId == "" { daemon.KillVm(vm.Id) } }() if vmId == "" { glog.V(1).Infof("The config: kernel=%s, initrd=%s", daemon.Kernel, daemon.Initrd) var ( cpu = 1 mem = 128 ) if userPod.Resource.Vcpu > 0 { cpu = userPod.Resource.Vcpu } if userPod.Resource.Memory > 0 { mem = userPod.Resource.Memory } b := &hypervisor.BootConfig{ CPU: cpu, Memory: mem, Kernel: daemon.Kernel, Initrd: daemon.Initrd, Bios: daemon.Bios, Cbfs: daemon.Cbfs, Vbox: daemon.VboxImage, } vm = daemon.NewVm("", cpu, mem, lazy, keep) err = vm.Launch(b) if err != nil { return -1, "", err } daemon.AddVm(vm) } else { var ok bool vm, ok = daemon.VmList[vmId] if !ok { err = fmt.Errorf("The VM %s doesn't exist", vmId) return -1, "", err } /* FIXME: check if any pod is running on this vm? */ glog.Infof("find vm:%s", vm.Id) if userPod.Resource.Vcpu != vm.Cpu { err = fmt.Errorf("The new pod's cpu setting is different with the VM's cpu") return -1, "", err } if userPod.Resource.Memory != vm.Mem { err = fmt.Errorf("The new pod's memory setting is different with the VM's memory") return -1, "", err } } fmt.Printf("POD id is %s\n", podId) containerInfoList, volumeInfoList, err := daemon.ParsePod(mypod, userPod, vm.Id) if err != nil { return -1, "", err } vmResponse := vm.StartPod(mypod, userPod, containerInfoList, volumeInfoList) if vmResponse.Data == nil { err = fmt.Errorf("VM response data is nil") return vmResponse.Code, vmResponse.Cause, err } data := vmResponse.Data.([]byte) err = daemon.UpdateVmData(vm.Id, data) if err != nil { glog.Error(err.Error()) return -1, "", err } // add or update the Vm info for POD if err := daemon.UpdateVmByPod(podId, vm.Id); err != nil { glog.Error(err.Error()) return -1, "", err } // XXX we should not close vmStatus chan, it will be closed in shutdown process return vmResponse.Code, vmResponse.Cause, nil }
func (daemon *Daemon) CreatePod(podId, podArgs string, config interface{}, autoremove bool) error { glog.V(1).Infof("podArgs: %s", podArgs) userPod, err := pod.ProcessPodBytes([]byte(podArgs)) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return err } if err := userPod.Validate(); err != nil { return err } mypod := hypervisor.NewPod(podId, userPod) mypod.Handler.Handle = hyperHandlePodEvent mypod.Handler.Data = daemon mypod.Autoremove = autoremove // store the UserPod into the db if err := daemon.WritePodToDB(podId, []byte(podArgs)); err != nil { glog.V(1).Info("Found an error while saveing the POD file") return err } containerIds, err := daemon.GetPodContainersByName(podId) if err != nil { glog.V(1).Info(err.Error()) } if containerIds != nil { for _, id := range containerIds { var ( name string image string ) if jsonResponse, err := daemon.DockerCli.GetContainerInfo(id); err == nil { name = jsonResponse.Name image = jsonResponse.Config.Image } mypod.AddContainer(id, name, image, []string{}, types.S_POD_CREATED) } } else { // Process the 'Containers' section glog.V(1).Info("Process the Containers section in POD SPEC\n") for _, c := range userPod.Containers { imgName := c.Image cId, _, err := daemon.DockerCli.SendCmdCreate(c.Name, imgName, []string{}, nil) if err != nil { glog.Error(err.Error()) daemon.DeletePodFromDB(podId) return err } mypod.AddContainer(string(cId), c.Name, imgName, []string{}, types.S_POD_CREATED) } } daemon.AddPod(mypod) if err = daemon.WritePodAndContainers(podId); err != nil { glog.V(1).Info("Found an error while saveing the Containers info") return err } return nil }
func (daemon *Daemon) CmdPodInfo(job *engine.Job) error { if len(job.Args) == 0 { return fmt.Errorf("Can not get Pod info without Pod ID") } daemon.PodsMutex.RLock() glog.V(2).Infof("lock read of PodList") defer daemon.PodsMutex.RUnlock() defer glog.V(2).Infof("unlock read of PodList") var ( podId string mypod *hypervisor.Pod userpod *pod.UserPod ok bool imageid string ) if strings.Contains(job.Args[0], "pod-") { podId = job.Args[0] // We need to find the VM which running the POD mypod, ok = daemon.PodList[podId] if !ok { return fmt.Errorf("Can not get Pod info with pod ID(%s)", podId) } } else { for _, p := range daemon.PodList { if p.Name == job.Args[0] { mypod = p break } } if mypod == nil { return fmt.Errorf("Can not get Pod info with pod name(%s)", job.Args[0]) } } podData, err := daemon.GetPodByName(mypod.Id) if err == nil { if userpod, err = pod.ProcessPodBytes(podData); err != nil { return err } } // Construct the PodInfo JSON structure cStatus := []types.ContainerStatus{} containers := []types.Container{} for i, c := range mypod.Containers { ports := []types.ContainerPort{} envs := []types.EnvironmentVar{} vols := []types.VolumeMount{} jsonResponse, err := daemon.DockerCli.GetContainerInfo(c.Id) if err == nil { for _, e := range jsonResponse.Config.Env { envs = append(envs, types.EnvironmentVar{ Env: e[:strings.Index(e, "=")], Value: e[strings.Index(e, "=")+1:]}) } imageid = jsonResponse.Image } for _, port := range userpod.Containers[i].Ports { ports = append(ports, types.ContainerPort{ HostPort: port.HostPort, ContainerPort: port.ContainerPort, Protocol: port.Protocol}) } for _, e := range userpod.Containers[i].Envs { envs = append(envs, types.EnvironmentVar{ Env: e.Env, Value: e.Value}) } for _, v := range userpod.Containers[i].Volumes { vols = append(vols, types.VolumeMount{ Name: v.Volume, MountPath: v.Path, ReadOnly: v.ReadOnly}) } container := types.Container{ Name: c.Name, ContainerID: c.Id, Image: c.Image, ImageID: imageid, Commands: userpod.Containers[i].Command, Args: []string{}, Workdir: userpod.Containers[i].Workdir, Ports: ports, Environment: envs, Volume: vols, ImagePullPolicy: "", } containers = append(containers, container) // Set ContainerStatus s := types.ContainerStatus{} s.Name = c.Name s.ContainerID = c.Id s.Waiting = types.WaitingStatus{Reason: ""} s.Running = types.RunningStatus{StartedAt: ""} s.Terminated = types.TermStatus{} if c.Status == runvtypes.S_POD_CREATED { s.Waiting.Reason = "Pending" s.Phase = "pending" } else if c.Status == runvtypes.S_POD_RUNNING { s.Running.StartedAt = mypod.StartedAt s.Phase = "running" } else { // S_POD_FAILED or S_POD_SUCCEEDED if c.Status == runvtypes.S_POD_FAILED { s.Terminated.ExitCode = c.ExitCode s.Terminated.Reason = "Failed" s.Phase = "failed" } else { s.Terminated.ExitCode = c.ExitCode s.Terminated.Reason = "Succeeded" s.Phase = "succeeded" } s.Terminated.StartedAt = mypod.StartedAt s.Terminated.FinishedAt = mypod.FinishedAt } cStatus = append(cStatus, s) } podVoumes := []types.PodVolume{} for _, v := range userpod.Volumes { podVoumes = append(podVoumes, types.PodVolume{ Name: v.Name, HostPath: v.Source, Driver: v.Driver}) } spec := types.PodSpec{ Volumes: podVoumes, Containers: containers, } podIPs := []string{} if mypod.Vm != "" { var vm *hypervisor.Vm = nil for _, m := range daemon.VmList { if mypod.Vm == m.Id { vm = m break } } if vm != nil { podIPs = mypod.GetPodIP(vm) } } status := types.PodStatus{ Status: cStatus, HostIP: utils.GetHostIP(), PodIP: podIPs, StartTime: mypod.StartedAt, } switch mypod.Status { case runvtypes.S_POD_CREATED: status.Phase = "Pending" break case runvtypes.S_POD_RUNNING: status.Phase = "Running" break case runvtypes.S_POD_SUCCEEDED: status.Phase = "Succeeded" break case runvtypes.S_POD_FAILED: status.Phase = "Failed" break } data := types.PodInfo{ Kind: "Pod", ApiVersion: utils.APIVERSION, Vm: mypod.Vm, Spec: spec, Status: status, } v := &engine.Env{} v.SetJson("data", data) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (daemon *Daemon) CmdContainerInfo(job *engine.Job) error { if len(job.Args) == 0 { return fmt.Errorf("Can not get Pod info without Pod ID") } daemon.PodsMutex.RLock() glog.V(2).Infof("lock read of PodList") defer daemon.PodsMutex.RUnlock() defer glog.V(2).Infof("unlock read of PodList") var ( find bool = false mypod *hypervisor.Pod c *hypervisor.Container i int = 0 imageid string userpod *pod.UserPod name string = job.Args[0] ) if name == "" { return fmt.Errorf("Null container name") } glog.Infof(name) for _, mypod = range daemon.PodList { for _, c = range mypod.Containers { glog.Infof(c.Name) if name[0] != '/' { if c.Name == "/"+name { find = true break } } else { if c.Name == name { find = true break } } if c.Id == name { find = true break } } if find == true { break } } if find == false { return fmt.Errorf("Can not find container by name(%s)", name) } podData, err := daemon.GetPodByName(mypod.Id) if err == nil { if userpod, err = pod.ProcessPodBytes(podData); err != nil { return err } } for k, v := range mypod.Containers { if v.Name == c.Name { i = k break } } ports := []types.ContainerPort{} envs := []types.EnvironmentVar{} vols := []types.VolumeMount{} jsonResponse, err := daemon.DockerCli.GetContainerInfo(c.Id) if err == nil { for _, e := range jsonResponse.Config.Env { envs = append(envs, types.EnvironmentVar{ Env: e[:strings.Index(e, "=")], Value: e[strings.Index(e, "=")+1:]}) } imageid = jsonResponse.Image } for _, port := range userpod.Containers[i].Ports { ports = append(ports, types.ContainerPort{ HostPort: port.HostPort, ContainerPort: port.ContainerPort, Protocol: port.Protocol}) } for _, e := range userpod.Containers[i].Envs { envs = append(envs, types.EnvironmentVar{ Env: e.Env, Value: e.Value}) } for _, v := range userpod.Containers[i].Volumes { vols = append(vols, types.VolumeMount{ Name: v.Volume, MountPath: v.Path, ReadOnly: v.ReadOnly}) } s := types.ContainerStatus{} s.Name = c.Name s.ContainerID = c.Id s.Waiting = types.WaitingStatus{Reason: ""} s.Running = types.RunningStatus{StartedAt: ""} s.Terminated = types.TermStatus{} if c.Status == runvtypes.S_POD_CREATED { s.Waiting.Reason = "Pending" s.Phase = "pending" } else if c.Status == runvtypes.S_POD_RUNNING { s.Running.StartedAt = mypod.StartedAt s.Phase = "running" } else { // S_POD_FAILED or S_POD_SUCCEEDED if c.Status == runvtypes.S_POD_FAILED { s.Terminated.ExitCode = c.ExitCode s.Terminated.Reason = "Failed" s.Phase = "failed" } else { s.Terminated.ExitCode = c.ExitCode s.Terminated.Reason = "Succeeded" s.Phase = "succeeded" } s.Terminated.StartedAt = mypod.StartedAt s.Terminated.FinishedAt = mypod.FinishedAt } container := types.ContainerInfo{ Name: c.Name, ContainerID: c.Id, Image: c.Image, ImageID: imageid, Commands: userpod.Containers[i].Command, Args: []string{}, Workdir: userpod.Containers[i].Workdir, Ports: ports, Environment: envs, Volume: vols, ImagePullPolicy: "", Status: s, } v := &engine.Env{} v.SetJson("data", container) if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }