func (cli *HyperClient) StopPod(podId, stopVm string) (int, string, error) { v := url.Values{} v.Set("podId", podId) v.Set("stopVm", stopVm) body, _, err := readBody(cli.call("POST", "/pod/stop?"+v.Encode(), nil, nil)) if err != nil { if strings.Contains(err.Error(), "leveldb: not found") { return -1, "", fmt.Errorf("Can not find that POD ID to stop, please check your POD ID!") } return -1, "", err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return -1, "", err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return -1, "", err } out.Close() // This 'ID' stands for pod ID // This 'Code' should be E_SHUTDOWN // THis 'Cause' .. if remoteInfo.Exists("ID") { // TODO ... } return remoteInfo.GetInt("Code"), remoteInfo.Get("Cause"), nil }
func (daemon *Daemon) CmdInfo(job *engine.Job) error { cli := daemon.dockerCli body, _, err := cli.SendCmdInfo("") out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { return fmt.Errorf("Error while reading remote info!\n") } out.Close() v := &engine.Env{} v.Set("ID", daemon.ID) if remoteInfo.Exists("Containers") { v.SetInt("Containers", remoteInfo.GetInt("Containers")) } // Get system infomation meminfo, err := sysinfo.GetMemInfo() osinfo, err := sysinfo.GetOSInfo() v.SetInt64("MemTotal", int64(meminfo.MemTotal)) v.SetInt64("Pods", daemon.GetPodNum()) v.Set("Operating System", osinfo.PrettyName) if hostname, err := os.Hostname(); err == nil { v.SetJson("Name", hostname) } if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (cli *HyperClient) GetContainerByPod(podId string) (string, error) { v := url.Values{} v.Set("item", "container") body, _, err := readBody(cli.call("GET", "/list?"+v.Encode(), nil, nil)) if err != nil { return "", err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return "", err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return "", err } out.Close() var containerResponse = []string{} containerResponse = remoteInfo.GetList("cData") for _, c := range containerResponse { fields := strings.Split(c, ":") containerId := fields[0] if podId == fields[1] { return containerId, nil } } return "", fmt.Errorf("Container not found") }
func (cli *HyperClient) GetPodInfo(podName string) (string, error) { // get the pod or container info before we start the exec v := url.Values{} v.Set("podName", podName) body, _, err := readBody(cli.call("GET", "/pod/info?"+v.Encode(), nil, nil)) if err != nil { fmt.Printf("The Error is encountered, %s\n", err) return "", err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return "", err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return "", err } out.Close() if remoteInfo.Exists("hostname") { hostname := remoteInfo.Get("hostname") if hostname == "" { return "", nil } else { return hostname, nil } } return "", nil }
func (daemon *Daemon) CmdCreate(job *engine.Job) error { imgName := job.Args[0] cli := daemon.dockerCli body, _, err := cli.SendCmdCreate(imgName) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { return fmt.Errorf("Error while reading remote info!\n") } out.Close() v := &engine.Env{} v.SetJson("ID", daemon.ID) containerId := remoteInfo.Get("Id") if containerId != "" { v.Set("ContainerID", containerId) glog.V(3).Infof("The ContainerID is %s\n", containerId) } else { return fmt.Errorf("Hyper ERROR: AN error encountered during creating container!\n") } if _, err := v.WriteTo(job.Stdout); err != nil { return err } return nil }
func (cli *HyperClient) CreatePod(jsonbody string) (string, error) { v := url.Values{} v.Set("podArgs", jsonbody) body, _, err := readBody(cli.call("POST", "/pod/create?"+v.Encode(), nil, nil)) if err != nil { return "", err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return "", err } if _, err := out.Write(body); err != nil { return "", fmt.Errorf("Error reading remote info: %s", err) } out.Close() errCode := remoteInfo.GetInt("Code") if errCode == types.E_OK { //fmt.Println("VM is successful to start!") } else { // case types.E_CONTEXT_INIT_FAIL: // case types.E_DEVICE_FAIL: // case types.E_QMP_INIT_FAIL: // case types.E_QMP_COMMAND_FAIL: if errCode != types.E_BAD_REQUEST && errCode != types.E_FAILED { return "", fmt.Errorf("Error code is %d", errCode) } else { return "", fmt.Errorf("Cause is %s", remoteInfo.Get("Cause")) } } return remoteInfo.Get("ID"), nil }
func (cli *HyperClient) HyperCmdVm(args ...string) error { var parser = gflag.NewParser(nil, gflag.Default) parser.Usage = "vm\n\nRun a VM, without any Pod running on it" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } // Only run a new VM body, _, err := readBody(cli.call("POST", "/vm/create", nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { return fmt.Errorf("Error reading remote info: %s", err) } out.Close() errCode := remoteInfo.GetInt("Code") if errCode == types.E_OK { //fmt.Println("VM is successful to start!") } else { // case types.E_CONTEXT_INIT_FAIL: // case types.E_DEVICE_FAIL: // case types.E_QMP_INIT_FAIL: // case types.E_QMP_COMMAND_FAIL: if errCode != types.E_BAD_REQUEST && errCode != types.E_FAILED { return fmt.Errorf("Error code is %d", errCode) } else { return fmt.Errorf("Cause is %s", remoteInfo.Get("Cause")) } } fmt.Printf("New VM id is %s\n", remoteInfo.Get("ID")) return nil }
func (cli *HyperClient) HyperCmdRm(args ...string) error { var parser = gflag.NewParser(nil, gflag.Default) parser.Usage = "rm POD_ID\n\ndestroy a pod" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if len(args) < 2 { return fmt.Errorf("\"rm\" requires a minimum of 1 argument, please provide POD ID.\n") } podId := args[1] v := url.Values{} v.Set("podId", podId) body, _, err := readBody(cli.call("POST", "/pod/remove?"+v.Encode(), nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { return fmt.Errorf("Error reading remote info: %s", err) } out.Close() errCode := remoteInfo.GetInt("Code") if errCode == types.E_OK || errCode == types.E_VM_SHUTDOWN { //fmt.Println("VM is successful to start!") } else { return fmt.Errorf("Error code is %d, Cause is %s", remoteInfo.GetInt("Code"), remoteInfo.Get("Cause")) } fmt.Printf("Pod(%s) is successful to be deleted!\n", podId) return nil }
func (cli *HyperClient) HyperCmdKill(args ...string) error { var parser = gflag.NewParser(nil, gflag.Default) parser.Usage = "kill VM_ID\n\nterminate a VM instance" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if len(args) == 1 { return fmt.Errorf("\"kill\" requires a minimum of 1 argument, please provide VM ID.\n") } vmId := args[1] v := url.Values{} v.Set("vm", vmId) body, _, err := readBody(cli.call("POST", "/vm/kill?"+v.Encode(), nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return err } out.Close() if remoteInfo.Exists("ID") { // TODO ... } return nil }
// we need this *info* function to get the whole status from the docker daemon func (cli *HyperClient) HyperCmdInfo(args ...string) error { var parser = gflag.NewParser(nil, gflag.Default) parser.Usage = "info\n\ndisplay system-wide information" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } body, _, err := readBody(cli.call("GET", "/info", nil, nil)) if err != nil { fmt.Printf("An error is encountered, %s\n", err) return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return err } out.Close() if remoteInfo.Exists("Containers") { fmt.Printf("Containers: %d\n", remoteInfo.GetInt("Containers")) } fmt.Printf("PODs: %d\n", remoteInfo.GetInt("Pods")) memTotal := remoteInfo.GetInt("MemTotal") fmt.Printf("Total Memory: %d KB\n", memTotal) fmt.Printf("Operating System: %s\n", remoteInfo.Get("Operating System")) return nil }
func NewDaemonFromDirectory(eng *engine.Engine) (*Daemon, error) { // register portallocator release on shutdown eng.OnShutdown(func() { if err := portallocator.ReleaseAll(); err != nil { glog.Errorf("portallocator.ReleaseAll(): %s", err.Error()) } }) // Check that the system is supported and we have sufficient privileges if runtime.GOOS != "linux" { return nil, fmt.Errorf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { return nil, fmt.Errorf("The Docker daemon needs to be run as root") } if err := checkKernel(); err != nil { return nil, err } cfg, err := goconfig.LoadConfigFile(eng.Config) if err != nil { glog.Errorf("Read config file (%s) failed, %s", eng.Config, err.Error()) return nil, err } kernel, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Kernel") initrd, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Initrd") glog.V(0).Infof("The config: kernel=%s, initrd=%s", kernel, initrd) biface, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Bridge") bridgeip, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "BridgeIP") glog.V(0).Infof("The config: bridge=%s, ip=%s", biface, bridgeip) bios, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Bios") cbfs, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Cbfs") glog.V(0).Infof("The config: bios=%s, cbfs=%s", bios, cbfs) host, _ := cfg.GetValue(goconfig.DEFAULT_SECTION, "Host") var tempdir = "/var/run/hyper/" os.Setenv("TMPDIR", tempdir) if err := os.MkdirAll(tempdir, 0755); err != nil && !os.IsExist(err) { return nil, err } var realRoot = "/var/lib/hyper/" // Create the root directory if it doesn't exists if err := os.MkdirAll(realRoot, 0755); err != nil && !os.IsExist(err) { return nil, err } if err := network.InitNetwork(biface, bridgeip); err != nil { glog.Errorf("InitNetwork failed, %s\n", err.Error()) return nil, err } var ( proto = "unix" addr = "/var/run/docker.sock" db_file = fmt.Sprintf("%s/hyper.db", realRoot) ) db, err := leveldb.OpenFile(db_file, nil) if err != nil { glog.Errorf("open leveldb file failed, %s\n", err.Error()) return nil, err } dockerCli := docker.NewDockerCli("", proto, addr, nil) qemuchan := map[string]interface{}{} qemuclient := map[string]interface{}{} subQemuClient := map[string]interface{}{} cList := []*Container{} pList := map[string]*Pod{} vList := map[string]*Vm{} daemon := &Daemon{ ID: fmt.Sprintf("%d", os.Getpid()), db: db, eng: eng, kernel: kernel, initrd: initrd, bios: bios, cbfs: cbfs, dockerCli: dockerCli, containerList: cList, podList: pList, vmList: vList, qemuChan: qemuchan, qemuClientChan: qemuclient, subQemuClientChan: subQemuClient, Host: host, } stor := &Storage{} // Get the docker daemon info body, _, err := dockerCli.SendCmdInfo() if err != nil { return nil, err } outInfo := engine.NewOutput() remoteInfo, err := outInfo.AddEnv() if err != nil { return nil, err } if _, err := outInfo.Write(body); err != nil { return nil, fmt.Errorf("Error while reading remote info!\n") } outInfo.Close() storageDriver := remoteInfo.Get("Driver") stor.StorageType = storageDriver if storageDriver == "devicemapper" { if remoteInfo.Exists("DriverStatus") { var driverStatus [][2]string if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { return nil, err } for _, pair := range driverStatus { if pair[0] == "Pool Name" { stor.PoolName = pair[1] } if pair[0] == "Backing Filesystem" { if strings.Contains(pair[1], "ext") { stor.Fstype = "ext4" } else if strings.Contains(pair[1], "xfs") { stor.Fstype = "xfs" } else { stor.Fstype = "dir" } break } } } } else { if remoteInfo.Exists("DriverStatus") { var driverStatus [][2]string if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { return nil, err } for _, pair := range driverStatus { if pair[0] == "Root Dir" { stor.RootPath = pair[1] } if pair[0] == "Backing Filesystem" { stor.Fstype = "dir" break } } } } daemon.Storage = stor dmPool := dm.DeviceMapper{ Datafile: "/var/lib/hyper/data", Metadatafile: "/var/lib/hyper/metadata", DataLoopFile: "/dev/loop6", MetadataLoopFile: "/dev/loop7", PoolName: "hyper-volume-pool", Size: 20971520 * 512, } if storageDriver == "devicemapper" { daemon.Storage.DmPoolData = &dmPool // Prepare the DeviceMapper storage if err := dm.CreatePool(&dmPool); err != nil { return nil, err } } else { daemon.CleanVolume(0) } eng.OnShutdown(func() { if err := daemon.shutdown(); err != nil { glog.Errorf("Error during daemon.shutdown(): %v", err) } }) return daemon, nil }
func (daemon *Daemon) CreatePod(podArgs, podId string, wg *sync.WaitGroup) error { userPod, err := pod.ProcessPodBytes([]byte(podArgs)) if err != nil { glog.V(1).Infof("Process POD file error: %s", err.Error()) return err } if err := userPod.Validate(); err != nil { return err } // store the UserPod into the db if err := daemon.WritePodToDB(podId, []byte(podArgs)); err != nil { glog.V(1).Info("Found an error while saveing the POD file") return err } containerIds, err := daemon.GetPodContainersByName(podId) if err != nil { glog.V(1).Info(err.Error()) } if containerIds != nil { for _, id := range containerIds { daemon.SetPodByContainer(id, podId, "", "", []string{}, types.S_POD_CREATED) } } else { // Process the 'Containers' section glog.V(1).Info("Process the Containers section in POD SPEC\n") for _, c := range userPod.Containers { imgName := c.Image body, _, err := daemon.dockerCli.SendCmdCreate(imgName) if err != nil { glog.Error(err.Error()) daemon.DeletePodFromDB(podId) return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { daemon.DeletePodFromDB(podId) return err } if _, err := out.Write(body); err != nil { daemon.DeletePodFromDB(podId) return fmt.Errorf("Error while reading remote info!\n") } out.Close() containerId := remoteInfo.Get("Id") daemon.SetPodByContainer(containerId, podId, "", "", []string{}, types.S_POD_CREATED) } } containers := []*Container{} for _, v := range daemon.containerList { if v.PodId == podId { containers = append(containers, v) } } mypod := &Pod{ Id: podId, Name: userPod.Name, Vm: "", Wg: wg, Containers: containers, Status: types.S_POD_CREATED, Type: userPod.Type, RestartPolicy: userPod.Containers[0].RestartPolicy, } daemon.AddPod(mypod) return nil }
func (cli *HyperClient) HyperCmdStart(args ...string) error { var opts struct { // OnlyVm bool `long:"onlyvm" default:"false" value-name:"false" description:"Only start a new VM"` Cpu int `short:"c" long:"cpu" default:"1" value-name:"1" description:"CPU number for the VM"` Mem int `short:"m" long:"memory" default:"128" value-name:"128" description:"Memory size (MB) for the VM"` } var parser = gflag.NewParser(&opts, gflag.Default) parser.Usage = "start [-c 1 -m 128]| POD_ID \n\nlaunch a 'pending' pod" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } if false { // Only run a new VM v := url.Values{} v.Set("cpu", fmt.Sprintf("%d", opts.Cpu)) v.Set("mem", fmt.Sprintf("%d", opts.Mem)) body, _, err := readBody(cli.call("POST", "/vm/create?"+v.Encode(), nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { return fmt.Errorf("Error reading remote info: %s", err) } out.Close() errCode := remoteInfo.GetInt("Code") if errCode == types.E_OK { //fmt.Println("VM is successful to start!") } else { // case types.E_CONTEXT_INIT_FAIL: // case types.E_DEVICE_FAIL: // case types.E_QMP_INIT_FAIL: // case types.E_QMP_COMMAND_FAIL: if errCode != types.E_BAD_REQUEST && errCode != types.E_FAILED { return fmt.Errorf("Error code is %d", errCode) } else { return fmt.Errorf("Cause is %s", remoteInfo.Get("Cause")) } } fmt.Printf("New VM id is %s\n", remoteInfo.Get("ID")) return nil } if len(args) < 2 { return fmt.Errorf("\"start\" requires a minimum of 1 argument, please provide POD ID.\n") } var ( podId string vmId string ) if len(args) == 2 { podId = args[1] } if len(args) == 3 { podId = args[1] vmId = args[2] } // fmt.Printf("Pod ID is %s, VM ID is %s\n", podId, vmId) _, err = cli.StartPod(podId, vmId) if err != nil { return err } fmt.Printf("Successfully started the Pod(%s)\n", podId) return nil }
func (cli *HyperClient) HyperCmdList(args ...string) error { var parser = gflag.NewParser(nil, gflag.Default) parser.Usage = "list [pod|container]\n\nlist all pods or container information" args, err := parser.Parse() if err != nil { if !strings.Contains(err.Error(), "Usage") { return err } else { return nil } } var item string if len(args) == 1 { item = "pod" } else { item = args[1] } if item != "pod" && item != "vm" && item != "container" { return fmt.Errorf("Error, the %s can not support %s list!", os.Args[0], item) } v := url.Values{} v.Set("item", item) body, _, err := readBody(cli.call("GET", "/list?"+v.Encode(), nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { fmt.Printf("Error reading remote info: %s", err) return err } out.Close() var ( vmResponse = []string{} podResponse = []string{} containerResponse = []string{} ) if remoteInfo.Exists("item") { item = remoteInfo.Get("item") } if remoteInfo.Exists("Error") { return fmt.Errorf("Found an error while getting %s list: %s", item, remoteInfo.Get("Error")) } if item == "vm" { vmResponse = remoteInfo.GetList("vmData") } if item == "pod" { podResponse = remoteInfo.GetList("podData") } if item == "container" { containerResponse = remoteInfo.GetList("cData") } //fmt.Printf("Item is %s\n", item) if item == "vm" { fmt.Printf("%15s%20s\n", "VM name", "Status") for _, vm := range vmResponse { fields := strings.Split(vm, ":") fmt.Printf("%15s%20s\n", fields[0], fields[2]) } } if item == "pod" { fmt.Printf("%15s%30s%20s%10s\n", "POD ID", "POD Name", "VM name", "Status") for _, p := range podResponse { fields := strings.Split(p, ":") var podName = fields[1] if len(fields[1]) > 27 { podName = fields[1][:27] } fmt.Printf("%15s%30s%20s%10s\n", fields[0], podName, fields[2], fields[3]) } } if item == "container" { fmt.Printf("%-66s%15s%10s\n", "Container ID", "POD ID", "Status") for _, c := range containerResponse { fields := strings.Split(c, ":") fmt.Printf("%-66s%15s%10s\n", fields[0], fields[1], fields[2]) } } return nil }