func waitInitReady(ctx *VmContext) { conn, err := UnixSocketConnect(ctx.HyperSockName) if err != nil { glog.Error("Cannot connect to hyper socket ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "Cannot connect to hyper socket " + err.Error(), } return } glog.Info("Wating for init messages...") msg, err := readVmMessage(conn.(*net.UnixConn)) if err != nil { glog.Error("read init message failed... ", err.Error()) ctx.Hub <- &InitFailedEvent{ Reason: "read init message failed... " + err.Error(), } conn.Close() } else if msg.code == INIT_READY { glog.Info("Get init ready message") ctx.Hub <- &InitConnectedEvent{conn: conn.(*net.UnixConn)} go waitCmdToInit(ctx, conn.(*net.UnixConn)) } else { glog.Warningf("Get init message %d", msg.code) ctx.Hub <- &InitFailedEvent{ Reason: fmt.Sprintf("Get init message %d", msg.code), } conn.Close() } }
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request glog.V(0).Infof("Calling %s %s\n", localMethod, localRoute) if logging { glog.V(1).Infof("%s %s\n", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { glog.Warningf("client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = utils.APIVERSION } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } if version.GreaterThan(utils.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, utils.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { glog.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } }
func UmountVolume(shareDir, volPath string, name string, hub chan QemuEvent) { mount := path.Join(shareDir, volPath) success := true err := syscall.Unmount(mount, 0) if err != nil { glog.Warningf("Cannot umount volume %s: %s", mount, err.Error()) err = syscall.Unmount(mount, syscall.MNT_DETACH) if err != nil { glog.Warningf("Cannot lazy umount volume %s: %s", mount, err.Error()) success = false } else { success = true } } if success == true { os.Remove(mount) } // After umount that device, we need to delete it hub <- &VolumeUnmounted{Name: name, Success: success} }
func aufsUnmount(target string) error { glog.V(1).Infof("Ready to unmount the target : %s", target) cmdString := fmt.Sprintf("auplink %s flush", target) cmd := exec.Command("/bin/sh", "-c", cmdString) if output, err := cmd.CombinedOutput(); err != nil { glog.Warningf("Couldn't run auplink command : %s\n%s\n", err.Error(), output) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
func aufsUnmount(target string) error { glog.V(1).Infof("Ready to unmount the target : %s", target) if _, err := os.Stat(target); err != nil && os.IsNotExist(err) { return nil } cmdString := fmt.Sprintf("auplink %s flush", target) cmd := exec.Command("/bin/sh", "-c", cmdString) if err := cmd.Run(); err != nil { glog.Warningf("Couldn't run auplink command : %s\n", err.Error()) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil }
func UmountAufsContainer(shareDir, image string, index int, hub chan QemuEvent) { mount := path.Join(shareDir, image) success := true for i := 0; i < 10; i++ { time.Sleep(3 * time.Second / 1000) err := aufs.Unmount(mount) if err != nil { glog.Warningf("Cannot umount aufs %s: %s", mount, err.Error()) success = false } else { success = true break } } hub <- &ContainerUnmounted{Index: index, Success: success} }
func UmountDMDevice(deviceFullPath, name string, hub chan QemuEvent) { args := fmt.Sprintf("dmsetup remove -f %s", deviceFullPath) cmd := exec.Command("/bin/sh", "-c", args) success := true if output, err := cmd.CombinedOutput(); err != nil { glog.Warningf("Cannot umount device %s: %s, %s", deviceFullPath, err.Error(), output) // retry cmd := exec.Command("/bin/sh", "-c", args) if err := cmd.Run(); err != nil { success = false } } else { // Command was successful success = true } // After umount that device, we need to delete it hub <- &BlockdevRemovedEvent{Name: name, Success: success} }
func UmountOverlayContainer(shareDir, image string, index int, hub chan QemuEvent) { mount := path.Join(shareDir, image) success := true for i := 0; i < 10; i++ { time.Sleep(3 * time.Second / 1000) err := syscall.Unmount(mount, 0) if err != nil { if strings.Contains(err.Error(), "no such file or directory") { break } glog.Warningf("Cannot umount overlay %s: %s", mount, err.Error()) success = false } else { success = true break } } hub <- &ContainerUnmounted{Index: index, Success: success} }
func UmountOverlayContainer(shareDir, image string, index int, hub chan VmEvent) { mount := path.Join(shareDir, image) success := true for i := 0; i < 10; i++ { time.Sleep(3 * time.Second / 1000) err := syscall.Unmount(mount, 0) if err != nil { if !strings.Contains(strings.ToLower(err.Error()), "device or resource busy") { success = true break } glog.Warningf("Cannot umount overlay %s: %s", mount, err.Error()) success = false } else { success = true break } } hub <- &ContainerUnmounted{Index: index, Success: success} }
func mainDaemon(config, host string) { glog.V(0).Infof("The config file is %s", config) if config == "" { config = "/etc/hyper/config" } eng := engine.New(config) d, err := daemon.NewDaemon(eng) if err != nil { glog.Errorf("The hyperd create failed, %s\n", err.Error()) return } stopAll := make(chan os.Signal, 1) signal.Notify(stopAll, syscall.SIGINT, syscall.SIGTERM) stop := make(chan os.Signal, 1) signal.Notify(stop, syscall.SIGHUP) // Install the accepted jobs if err := d.Install(eng); err != nil { glog.Errorf("The hyperd install failed, %s\n", err.Error()) return } glog.V(0).Infof("Hyper daemon: %s %s\n", utils.VERSION, utils.GITCOMMIT, ) // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { glog.Error("the acceptconnections job run failed!\n") return } defaultHost := []string{} if host != "" { defaultHost = append(defaultHost, host) } defaultHost = append(defaultHost, "unix:///var/run/hyper.sock") if d.Host != "" { defaultHost = append(defaultHost, d.Host) } job := eng.Job("serveapi", defaultHost...) // The serve API job never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := job.Run(); err != nil { glog.Errorf("ServeAPI error: %v\n", err) serveAPIWait <- err return } serveAPIWait <- nil }() glog.V(0).Info("Daemon has completed initialization\n") if err := d.Restore(); err != nil { glog.Warningf("Fail to restore the previous VM") return } // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete select { case errAPI := <-serveAPIWait: // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) eng.Shutdown() if errAPI != nil { glog.Warningf("Shutting down due to ServeAPI error: %v\n", errAPI) } break case <-stop: d.DestroyAndKeepVm() eng.Shutdown() break case <-stopAll: d.DestroyAllVm() eng.Shutdown() break } }