// checkDegree broadcasts connection solicitations with exponential backoff until // the degree is met, then returns. checkDegree locks and will cause the caller to block // until the degree is met. It should only be run as a goroutine. func (n *Node) checkDegree() { // check degree only if we're not already running n.degreeLock.Lock() defer n.degreeLock.Unlock() var backoff uint = 1 s := rand.NewSource(time.Now().UnixNano()) r := rand.New(s) for n.numClients() < n.degree { log.Debugln("soliciting connections") b := net.IPv4(255, 255, 255, 255) addr := net.UDPAddr{ IP: b, Port: n.port, } socket, err := net.DialUDP("udp4", nil, &addr) if err != nil { log.Error("checkDegree: %v", err) break } message := fmt.Sprintf("meshage:%s:%s", n.namespace, n.name) _, err = socket.Write([]byte(message)) socket.Close() if err != nil { log.Error("checkDegree: %v", err) break } wait := r.Intn(1 << backoff) time.Sleep(time.Duration(wait) * time.Second) if backoff < 7 { // maximum wait won't exceed 128 seconds backoff++ } } }
func vncClear() error { for k, v := range vncKBRecording { log.Debug("stopping kb recording for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncKBRecording, k) } for k, v := range vncFBRecording { log.Debug("stopping fb recording for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncFBRecording, k) } for k, v := range vncKBPlaying { log.Debug("stopping kb playing for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncKBPlaying, k) } return nil }
func meshageHandler() { for { m := <-meshageCommandChan go func() { mCmd := m.Body.(meshageCommand) cmd, err := minicli.Compile(mCmd.Original) if err != nil { log.Error("invalid command from mesh: `%s`", mCmd.Original) return } resps := []minicli.Responses{} for resp := range runCommand(cmd) { resps = append(resps, resp) } if len(resps) > 1 || len(resps[0]) > 1 { // This should never happen because the only commands that // return multiple responses are `read` and `mesh send` which // aren't supposed to be sent across meshage. log.Error("unsure how to process multiple responses!!") } resp := meshageResponse{Response: *resps[0][0], TID: mCmd.TID} recipient := []string{m.Source} _, err = meshageNode.Set(recipient, resp) if err != nil { log.Errorln(err) } }() } }
func webStart(port int, root string) { // Initialize templates templates := filepath.Join(root, "templates") log.Info("compiling templates from %s", templates) web.Templates = template.New("minimega-templates") filepath.Walk(templates, func(path string, info os.FileInfo, err error) error { if err != nil { log.Error("failed to load template from %s", path) return nil } if !info.IsDir() && strings.HasSuffix(path, ".html") { web.Templates.ParseFiles(path) } return nil }) mux := http.NewServeMux() for _, v := range []string{"novnc", "libs", "include"} { path := fmt.Sprintf("/%s/", v) dir := http.Dir(filepath.Join(root, v)) mux.Handle(path, http.StripPrefix(path, http.FileServer(dir))) } mux.HandleFunc("/", webVMs) mux.HandleFunc("/map", webMapVMs) mux.HandleFunc("/screenshot/", webScreenshot) mux.HandleFunc("/hosts", webHosts) mux.HandleFunc("/tags", webVMTags) mux.HandleFunc("/tiles", webTileVMs) mux.HandleFunc("/graph", webGraph) mux.HandleFunc("/json", webJSON) mux.HandleFunc("/vnc/", webVNC) mux.HandleFunc("/ws/", vncWsHandler) if web.Server == nil { web.Server = &http.Server{ Addr: fmt.Sprintf(":%d", port), Handler: mux, } err := web.Server.ListenAndServe() if err != nil { log.Error("web: %v", err) web.Server = nil } else { web.Port = port web.Running = true } } else { log.Info("web: changing web root to: %s", root) if port != web.Port && port != defaultWebPort { log.Error("web: changing web's port is not supported") } // just update the mux web.Server.Handler = mux } }
// Flush cleans up all resources allocated to the VM which includes all the // network taps. func (vm *KvmVM) Flush() error { vm.lock.Lock() defer vm.lock.Unlock() for _, net := range vm.Networks { // Handle already disconnected taps differently since they aren't // assigned to any bridges. if net.VLAN == DisconnectedVLAN { if err := bridge.DestroyTap(net.Tap); err != nil { log.Error("leaked tap %v: %v", net.Tap, err) } continue } br, err := getBridge(net.Bridge) if err != nil { return err } if err := br.DestroyTap(net.Tap); err != nil { log.Error("leaked tap %v: %v", net.Tap, err) } } return vm.BaseVM.Flush() }
func (vms VMs) kill(target string) []error { killedVms := map[int]bool{} errs := expandVmTargets(target, false, func(vm VM, _ bool) (bool, error) { if vm.GetState()&VM_KILLABLE == 0 { return false, nil } if err := vm.Kill(); err != nil { log.Error("unleash the zombie VM: %v", err) } else { killedVms[vm.GetID()] = true } return true, nil }) outer: for len(killedVms) > 0 { select { case id := <-killAck: log.Info("VM %v killed", id) delete(killedVms, id) case <-time.After(COMMAND_TIMEOUT * time.Second): log.Error("vm kill timeout") break outer } } for id := range killedVms { log.Info("VM %d failed to acknowledge kill", id) } return errs }
func (vm *ContainerVM) console(stdin, stdout, stderr *os.File) { socketPath := filepath.Join(vm.instancePath, "console") l, err := net.Listen("unix", socketPath) if err != nil { log.Error("could not start unix domain socket console on vm %v: %v", vm.ID, err) return } vm.listener = l for { conn, err := l.Accept() if err != nil { if strings.Contains(err.Error(), "use of closed network connection") { return } log.Error("console socket on vm %v: %v", vm.ID, err) continue } log.Debug("new connection!") go io.Copy(conn, stdout) go io.Copy(conn, stderr) io.Copy(stdin, conn) log.Debug("disconnected!") } }
func vncClear() { for k, v := range vncKBRecording { if inNamespace(v.VM) { log.Debug("stopping kb recording for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncKBRecording, k) } } for k, v := range vncFBRecording { if inNamespace(v.VM) { log.Debug("stopping fb recording for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncFBRecording, k) } } for k, v := range vncPlaying { if inNamespace(v.VM) { log.Debug("stopping kb playing for %v", k) if err := v.Stop(); err != nil { log.Error("%v", err) } delete(vncPlaying, k) } } }
// recvFiles retrieves a list of files from the ron server by requesting each // one individually. func recvFiles(files []*ron.File) { start := time.Now() var size int64 for _, v := range files { log.Info("requesting file %v", v) dst := filepath.Join(*f_path, "files", v.Name) if _, err := os.Stat(dst); err == nil { // file exists (TODO: overwrite?) log.Info("skipping %v -- already exists") continue } m := &ron.Message{ Type: ron.MESSAGE_FILE, UUID: Client.UUID, Filename: v.Name, } if err := sendMessage(m); err != nil { log.Error("send failed: %v", err) return } resp := <-Client.fileChan if resp.Filename != v.Name { log.Error("filename mismatch: %v != %v", resp.Filename, v.Name) continue } if resp.Error != "" { log.Error("%v", resp.Error) continue } dir := filepath.Dir(dst) if err := os.MkdirAll(dir, os.FileMode(0770)); err != nil { log.Errorln(err) continue } if err := ioutil.WriteFile(dst, resp.File, v.Perm); err != nil { log.Errorln(err) continue } size += int64(len(resp.File)) } d := time.Since(start) rate := (float64(size) / 1024 / d.Seconds()) log.Debug("received %v bytes in %v (%v KBps)", size, d, rate) return }
// newConnection processes a new incoming connection from another node, processes the connection // handshake, adds the connection to the client list, and starts the client message handler. func (n *Node) newConnection(conn net.Conn) { log.Debug("newConnection: %v", conn.RemoteAddr().String()) // are we soliciting connections? var solicited bool if uint(len(n.clients)) < n.degree { solicited = true } else { solicited = false } log.Debug("solicited: %v", solicited) c := &client{ conn: conn, enc: gob.NewEncoder(conn), dec: gob.NewDecoder(conn), ack: make(chan uint64, RECEIVE_BUFFER), } // the handshake involves the following: // 1. We send our name and our solicitation status // 2a. If the connection is solicited but we're all full, the remote node simply hangs up // 2b. If the connection is unsolicited or solicited and we are still soliciting connections, the remote node responds with its name // 3. The connection is valid, add it to our client list and broadcast a MSA announcing the new connection. // 4. The remote node does the same as 3. err := c.enc.Encode(n.name) if err != nil { log.Error("newConnection encode name: %v: %v", n.name, err) c.conn.Close() return } err = c.enc.Encode(solicited) if err != nil { log.Error("newConnection encode solicited: %v: %v", n.name, err) c.conn.Close() return } var resp string err = c.dec.Decode(&resp) if err != nil { if err != io.EOF { log.Error("newConnection decode name: %v: %v", n.name, err) } c.conn.Close() return } c.name = resp log.Debug("handshake from: %v", c.name) n.clientLock.Lock() n.clients[resp] = c n.clientLock.Unlock() go n.clientHandler(resp) }
func (b *Bridge) snooper() { var ( dot1q layers.Dot1Q eth layers.Ethernet ip4 layers.IPv4 ip6 layers.IPv6 arp layers.ARP ) parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &dot1q, ð, &ip4, &ip6, &arp, ) decodedLayers := []gopacket.LayerType{} for { data, _, err := b.handle.ReadPacketData() if err != nil { if err != io.EOF { log.Error("error reading packet data: ", err) } break } if err := parser.DecodeLayers(data, &decodedLayers); err != nil { if err2, ok := err.(gopacket.UnsupportedLayerType); ok { switch gopacket.LayerType(err2) { case layers.LayerTypeICMPv6, gopacket.LayerTypePayload: // ignore err = nil default: continue } } if err != nil { log.Error("error parsing packet: %v", err) continue } } for _, layerType := range decodedLayers { switch layerType { case layers.LayerTypeICMPv6: b.updateIP(eth.SrcMAC.String(), ip6.SrcIP) case layers.LayerTypeARP: b.updateIP(eth.SrcMAC.String(), net.IP(arp.SourceProtAddress)) } } } log.Info("%v snoop out", b.Name) }
func nukeTap(b, tap string) { if err := ovsDelPort(b, tap); err != nil && err != ErrNoSuchPort { log.Error("%v, %v -- %v", b, tap, err) } if err := delTap(tap); err != nil { log.Error("%v -- %v", tap, err) } }
// route an outgoing message to one or all clients, according to UUID func (s *Server) route(m *Message) { handleUUID := func(uuid string) { c, ok := s.clients[uuid] if !ok { log.Error("no such client %v", uuid) return } vm, ok := s.vms[uuid] if !ok { // The client is connected but not registered: // * client connected before it was registered // * client was unregistered before it disconnected // Either way, we have to skip it since we don't know what // namespace it belongs to. log.Error("unregistered client %v", uuid) return } // Create a copy of the Message m2 := *m m2.Tags = vm.GetTags() m2.Namespace = vm.GetNamespace() if err := c.sendMessage(&m2); err != nil { if strings.Contains(err.Error(), "broken pipe") { log.Debug("client disconnected: %v", uuid) } else { log.Info("unable to send message to %v: %v", uuid, err) } } } s.clientLock.Lock() defer s.clientLock.Unlock() if m.UUID != "" { handleUUID(m.UUID) return } var wg sync.WaitGroup // send commands to all clients, in parallel for uuid := range s.clients { wg.Add(1) go func(uuid string) { defer wg.Done() handleUUID(uuid) }(uuid) } wg.Wait() }
// clientHandler is called as a goroutine after a successful handshake. It // begins by issuing an MSA. When the receiver exits, another MSA is issued // without the client. func (n *Node) clientHandler(host string) { log.Debug("clientHandler: %v", host) c, err := n.getClient(host) if err != nil { log.Error("client %v vanished -- %v", host, err) return } n.MSA() for { var m Message c.conn.SetReadDeadline(time.Now().Add(deadlineMultiplier * n.msaTimeout)) err := c.dec.Decode(&m) if err != nil { if err != io.EOF && !strings.Contains(err.Error(), "connection reset by peer") { log.Error("client %v decode: %v", host, err) } break } if log.WillLog(log.DEBUG) { log.Debug("decoded message: %v: %v", c.name, &m) } if m.Command == ACK { c.ack <- m.ID } else { // send an ack a := Message{ Command: ACK, ID: m.ID, } c.conn.SetWriteDeadline(time.Now().Add(deadlineMultiplier * n.msaTimeout)) err := c.enc.Encode(a) if err != nil { if err != io.EOF { log.Error("client %v encode ACK: %v", host, err) } break } n.messagePump <- &m } } log.Info("client %v disconnected", host) // client has disconnected c.conn.Close() n.clientLock.Lock() delete(n.clients, c.name) n.clientLock.Unlock() go n.checkDegree() n.MSA() }
func runTests() { mm, err := miniclient.Dial(*f_base) if err != nil { log.Fatal("%v", err) } if *f_preamble != "" { out, err := runCommands(mm, *f_preamble) if err != nil { log.Fatal("%v", err) } log.Info(out) } // TODO: Should we quit minimega and restart it between each test? //quit := mustCompile(t, "quit 2") files, err := ioutil.ReadDir(*f_testDir) if err != nil { log.Fatal("%v", err) } for _, info := range files { if strings.HasSuffix(info.Name(), ".want") || strings.HasSuffix(info.Name(), ".got") { continue } log.Info("Running commands from %s", info.Name()) fpath := path.Join(*f_testDir, info.Name()) got, err := runCommands(mm, fpath) if err != nil { log.Fatal("%v", err) } // Record the output for offline comparison if err := ioutil.WriteFile(fpath+".got", []byte(got), os.FileMode(0644)); err != nil { log.Error("unable to write `%s` -- %v", fpath+".got", err) } want, err := ioutil.ReadFile(fpath + ".want") if err != nil { log.Error("unable to read file `%s` -- %v", fpath+".want", err) continue } if got != string(want) { log.Error("got != want for %s", info.Name()) } //mm.runCommand(quit) } }
func webVMs(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } table := htmlTable{ Header: []string{"host", "screenshot"}, Tabular: [][]interface{}{}, ID: "example", Class: "hover", } table.Header = append(table.Header, vmMasks...) stateMask := VM_QUIT | VM_ERROR for host, vms := range globalVmInfo() { vmLoop: for _, vm := range vms { var buf bytes.Buffer if vm.GetState()&stateMask == 0 { params := vmScreenshotParams{ Host: host, Name: vm.GetName(), Port: 5900 + vm.GetID(), ID: vm.GetID(), Size: 140, } if err := web.Templates.ExecuteTemplate(&buf, "fragment/screenshot", ¶ms); err != nil { log.Error("unable to execute template screenshot -- %v", err) continue } } res := []interface{}{host, template.HTML(buf.String())} for _, mask := range vmMasks { if v, err := vm.Info(mask); err != nil { log.Error("unable to get info from VM %s:%s -- %v", host, vm.GetName(), err) continue vmLoop } else { res = append(res, v) } } table.Tabular = append(table.Tabular, res) } } webRenderTemplate(w, "table.html", table) }
func hostid(s string) (string, int) { k := strings.Split(s, ":") if len(k) != 2 { log.Error("hostid cannot split host vmid pair: %v", k) return "", -1 } val, err := strconv.Atoi(k[1]) if err != nil { log.Error("parse hostid: %v", err) return "", -1 } return k[0], val }
func webStart(port int, root string) { // Initialize templates var err error templates := filepath.Join(root, "templates", "*.html") log.Info("compiling templates from %s", templates) web.Templates, err = template.ParseGlob(templates) if err != nil { log.Error("failed to load templates from %s", templates) return } mux := http.NewServeMux() for _, v := range []string{"novnc", "d3", "include"} { path := fmt.Sprintf("/%s/", v) dir := http.Dir(filepath.Join(root, v)) mux.Handle(path, http.StripPrefix(path, http.FileServer(dir))) } mux.HandleFunc("/", webVMs) mux.HandleFunc("/map", webMapVMs) mux.HandleFunc("/screenshot/", webScreenshot) mux.HandleFunc("/hosts", webHosts) mux.HandleFunc("/tags", webVMTags) mux.HandleFunc("/tiles", webTileVMs) mux.HandleFunc("/vnc/", webVNC) mux.HandleFunc("/ws/", vncWsHandler) if web.Server == nil { web.Server = &http.Server{ Addr: fmt.Sprintf(":%d", port), Handler: mux, } err := web.Server.ListenAndServe() if err != nil { log.Error("web: %v", err) web.Server = nil } else { web.Port = port web.Running = true } } else { log.Info("web: changing web root to: %s", root) if port != web.Port && port != defaultWebPort { log.Error("web: changing web's port is not supported") } // just update the mux web.Server.Handler = mux } }
// runCommand runs a command through a JSON pipe. func runCommand(cmd Command) chan *localResponse { conn, err := net.Dial("unix", path.Join(*f_minimega, "minimega")) if err != nil { log.Errorln(err) return nil } enc := json.NewEncoder(conn) dec := json.NewDecoder(conn) log.Debug("encoding command: %v", cmd) err = enc.Encode(cmd) if err != nil { log.Error("local command json encode: %v", err) return nil } log.Debugln("encoded command:", cmd) respChan := make(chan *localResponse) go func() { defer close(respChan) for { var r localResponse err = dec.Decode(&r) if err != nil { if err == io.EOF { log.Infoln("server disconnected") return } log.Error("local command json decode: %v", err) return } respChan <- &r if !r.More { log.Debugln("got last message") break } else { log.Debugln("expecting more data") } } }() return respChan }
func captureUpdateNFTimeouts() { for _, b := range bridges.Names() { br, err := getBridge(b) if err != nil { log.Error("could not get bridge: %v", err) continue } err = br.SetNetflowTimeout(captureNFTimeout) if err != nil && !strings.Contains(err.Error(), "has no netflow object") { log.Error("unable to update netflow timeout: %v", err) } } }
// incoming message mux. Routes messages to the correct handlers based on // message type func (s *Server) mux() { for { m := <-s.in switch m.Type { case MESSAGE_CLIENT: // handle a client response log.Debugln("ron MESSAGE_CLIENT") s.responses <- m.Client case MESSAGE_TUNNEL: // handle a tunnel message log.Debugln("ron MESSAGE_TUNNEL") s.routeTunnel(m) case MESSAGE_COMMAND: // route a command to one or all clients log.Debugln("ron MESSAGE_COMMAND") s.route(m) case MESSAGE_FILE: // send a file if it exists s.sendFile(m) default: log.Error("unknown message type: %v", m.Type) return } } }
// destroy all bridges func bridgesDestroy() error { var e []string for k, v := range bridges { err := v.Destroy() if err != nil { e = append(e, err.Error()) } bridgeLock.Lock() delete(bridges, k) bridgeLock.Unlock() } bridgeLock.Lock() updateBridgeInfo() bridgeLock.Unlock() bridgeFile := *f_base + "bridges" err := os.Remove(bridgeFile) if err != nil { log.Error("bridgesDestroy: could not remove bridge file: %v", err) } if len(e) == 0 { return nil } else { return errors.New(strings.Join(e, " : ")) } }
// Get the VM info from all hosts optionally applying column/row filters. // Returns a map with keys for the hostnames and values as the tabular data // from the host. func globalVmInfo() map[string]VMs { cmdStr := "vm info" res := map[string]VMs{} cmd := minicli.MustCompile(cmdStr) cmd.Record = false for resps := range runCommandGlobally(cmd) { for _, resp := range resps { if resp.Error != "" { log.Errorln(resp.Error) continue } switch data := resp.Data.(type) { case VMs: res[resp.Host] = data default: log.Error("unknown data field in vm info") } } } return res }
// nukeTaps removes a list of tap devices func nukeTaps(taps []string) { for _, t := range taps { if err := bridge.DestroyTap(t); err != nil { log.Error("%v -- %v", t, err) } } }
// create an overlay mount (linux 3.18 or greater) is snapshot mode is // being used. func (vm *ContainerVM) overlayMount() error { vm.effectivePath = filepath.Join(vm.instancePath, "fs") workPath := filepath.Join(vm.instancePath, "fs_work") err := os.MkdirAll(vm.effectivePath, 0755) if err != nil { return err } err = os.MkdirAll(workPath, 0755) if err != nil { return err } // create the overlay mountpoint args := []string{ "mount", "-t", "overlay", fmt.Sprintf("megamount_%v", vm.ID), "-o", fmt.Sprintf("lowerdir=%v,upperdir=%v,workdir=%v", vm.FSPath, vm.effectivePath, workPath), vm.effectivePath, } log.Debug("mounting overlay: %v", args) out, err := processWrapper(args...) if err != nil { log.Error("overlay mount: %v %v", err, out) return err } return nil }
// Walks the f_base directory and kills procs read from any qemu or // dnsmasq pid files func nukeWalker(path string, info os.FileInfo, err error) error { if err != nil { return nil } log.Debug("walking file: %v", path) switch info.Name() { case "qemu.pid", "dnsmasq.pid": d, err := ioutil.ReadFile(path) t := strings.TrimSpace(string(d)) log.Debug("found pid: %v", t) if err != nil { return err } args := []string{ "kill", t, } log.Infoln("killing process:", t) out, err := processWrapper(args...) if err != nil { log.Error("%v: %v", err, out) } } return nil }
func nukeBridges() { for _, b := range nukeBridgeNames(false) { if err := bridge.DestroyBridge(b); err != nil { log.Error("%v -- %v", b, err) } } }
func (vms VMs) kill(target string) []error { killedVms := map[int]bool{} errs := expandVmTargets(target, false, func(vm VM, _ bool) (bool, error) { if vm.State()&(VM_QUIT|VM_ERROR) != 0 { return false, nil } vm.Kill() killedVms[vm.ID()] = true return true, nil }) outer: for len(killedVms) > 0 { select { case id := <-killAck: log.Info("VM %v killed", id) delete(killedVms, id) case <-time.After(COMMAND_TIMEOUT * time.Second): log.Error("vm kill timeout") break outer } } for id := range killedVms { log.Info("VM %d failed to acknowledge kill", id) } return errs }
// return a file to a client requesting it via the clients GetFile() call func (s *Server) sendFile(m *Message) { log.Debug("ron sendFile: %v", m.Filename) filename := filepath.Join(s.path, m.Filename) info, err := os.Stat(filename) if err != nil { e := fmt.Errorf("file %v does not exist: %v", filename, err) m.Error = e.Error() log.Errorln(e) } else if info.IsDir() { e := fmt.Errorf("file %v is a directory", filename) m.Error = e.Error() log.Errorln(e) } else { // read the file m.File, err = ioutil.ReadFile(filename) if err != nil { e := fmt.Errorf("file %v: %v", filename, err) m.Error = e.Error() log.Errorln(e) } } // route this message ourselves instead of using the mux, because we // want the type to still be FILE s.clientLock.Lock() defer s.clientLock.Unlock() if c, ok := s.clients[m.UUID]; ok { c.out <- m } else { log.Error("no such client %v", m.UUID) } }
func (n *Node) flood(m *Message) { if log.WillLog(log.DEBUG) { log.Debug("flood: %v", m) } n.clientLock.Lock() defer n.clientLock.Unlock() floodLoop: for k, _ := range n.clients { for _, j := range m.CurrentRoute { if k == j { continue floodLoop } } go func(j string, m *Message) { err := n.clientSend(j, m) if err != nil { // is j still a client? if n.hasClient(j) { log.Error("flood to client %v: %v", j, err) } } }(k, m) } }