func sshClientActivity(index int) { sc := sshConns[index] s := rand.NewSource(time.Now().UnixNano()) r := rand.New(s) // generate a random byte slice l := r.Intn(128) b := make([]byte, l) for i, _ := range b { b[i] = byte(r.Int()) } data := base64.StdEncoding.EncodeToString(b) log.Debug("ssh activity to %v with %v", sc.Host, data) start := time.Now().UnixNano() sc.Stdin.Write([]byte(data)) sc.Stdin.Write([]byte{'\r', '\n'}) sshReportChan <- uint64(len(data)) expected := fmt.Sprintf("> %v\r\n%v\r\n> ", data, data) for i := 0; i < 10 && sc.StdoutBuf.String() != expected; i++ { time.Sleep(100 * time.Millisecond) } stop := time.Now().UnixNano() log.Info("ssh %v %vns", sc.Host, uint64(stop-start)) log.Debugln("ssh: ", sc.StdoutBuf.String()) sc.StdoutBuf.Reset() }
// cliPreprocessor allows modifying commands post-compile but pre-process. // Current preprocessors "file:", "http://", and "https://". // // Note: we don't run preprocessors when we're not running the `local` behavior // (see wrapBroadcastCLI) to avoid expanding files before we're running the // command on the correct machine. func cliPreprocessor(c *minicli.Command) error { if c.Source != GetNamespaceName() { return nil } for k, v := range c.StringArgs { v2, err := cliPreprocess(v) if err != nil { return err } if v != v2 { log.Info("cliPreprocess: [%v] %v -> %v", k, v, v2) } c.StringArgs[k] = v2 } for k := range c.ListArgs { for k2, v := range c.ListArgs[k] { v2, err := cliPreprocess(v) if err != nil { return err } if v != v2 { log.Info("cliPreprocessor: [%v][%v] %v -> %v", k, k2, v, v2) } c.ListArgs[k][k2] = v2 } } return nil }
// Kill VMs matching target. func (vms VMs) Kill(target string) []error { vmLock.Lock() defer vmLock.Unlock() killedVms := map[int]bool{} // For each VM, kill it if it's in a killable state. Should not be run in // parallel because we record the IDs of the VMs we kill in killedVms. applyFunc := func(vm VM, _ bool) (bool, error) { if vm.GetState()&VM_KILLABLE == 0 { return false, nil } if err := vm.Kill(); err != nil { log.Error("unleash the zombie VM: %v", err) } else { killedVms[vm.GetID()] = true } return true, nil } errs := vms.apply(target, false, applyFunc) for len(killedVms) > 0 { id := <-killAck log.Info("VM %v killed", id) delete(killedVms, id) } for id := range killedVms { log.Info("VM %d failed to acknowledge kill", id) } return errs }
func (b *Bridge) addTap(tap, mac string, lan int, host bool) error { log.Info("adding tap on bridge: %v %v %v %v %v", b.Name, tap, mac, lan, host) // reap taps before adding to avoid someone killing/restarting a vm faster // than the periodic tap reaper b.reapTaps() if _, ok := b.taps[tap]; ok { return fmt.Errorf("tap already on bridge") } err := ovsAddPort(b.Name, tap, lan, host) if err == errAlreadyExists { // Special case -- tap is already on bridge... try to remove it first // and then add it again. log.Info("tap %v is already on bridge, adding again", tap) if err = ovsDelPort(b.Name, tap); err == nil { err = ovsAddPort(b.Name, tap, lan, host) } } if err != nil { return err } b.taps[tap] = &Tap{ Name: tap, Bridge: b.Name, VLAN: lan, MAC: mac, Host: host, } return nil }
func (vm *ContainerVM) Start() (err error) { vm.lock.Lock() defer vm.lock.Unlock() if vm.State&VM_RUNNING != 0 { return nil } if vm.State == VM_QUIT || vm.State == VM_ERROR { log.Info("relaunching VM: %v", vm.ID) // Create a new channel since we closed the other one to indicate that // the VM should quit. vm.kill = make(chan bool) // Launch handles setting the VM to error state if err := vm.launch(); err != nil { return err } } log.Info("starting VM: %v", vm.ID) if err := vm.thaw(); err != nil { log.Errorln(err) vm.setError(err) return err } vm.setState(VM_RUNNING) return nil }
func (vms VMs) kill(target string) []error { killedVms := map[int]bool{} errs := expandVmTargets(target, false, func(vm VM, _ bool) (bool, error) { if vm.State()&(VM_QUIT|VM_ERROR) != 0 { return false, nil } vm.Kill() killedVms[vm.ID()] = true return true, nil }) outer: for len(killedVms) > 0 { select { case id := <-killAck: log.Info("VM %v killed", id) delete(killedVms, id) case <-time.After(COMMAND_TIMEOUT * time.Second): log.Error("vm kill timeout") break outer } } for id := range killedVms { log.Info("VM %d failed to acknowledge kill", id) } return errs }
func kill(pid int) { Client.Lock() defer Client.Unlock() if pid == -1 { // Wildcard log.Info("killing all processes") for _, p := range Client.Processes { if err := p.process.Kill(); err != nil { log.Errorln(err) } } return } log.Info("killing PID %v", pid) if p, ok := Client.Processes[pid]; ok { if err := p.process.Kill(); err != nil { log.Errorln(err) } return } log.Error("no such process: %v", pid) }
func (vm *vmKVM) Start() error { s := vm.State() stateMask := VM_PAUSED | VM_BUILDING | VM_QUIT | VM_ERROR if s&stateMask == 0 { return nil } if s == VM_QUIT || s == VM_ERROR { log.Info("restarting VM: %v", vm.id) ack := make(chan int) go vm.launch(ack) log.Debug("ack restarted VM %v", <-ack) } log.Info("starting VM: %v", vm.id) err := vm.q.Start() if err != nil { log.Errorln(err) if err != qmp.ERR_READY { vm.setState(VM_ERROR) } } else { vm.setState(VM_RUNNING) } return err }
// mux to handle i/o over the transport. Data on channel out will be sent over // the transport. Data coming in over the transport will be routed to the // incoming channel as tagged be the message's TID. This allows us to trunk // multiple tunnels over a single transport. func (t *Tunnel) mux() { var err error log.Info("starting minitunnel mux") for { var m tunnelMessage if err = t.dec.Decode(&m); err != nil { break } log.Debug("new message: %v", m.Type) // create new session if necessary if m.Type == CONNECT { t.handleRemote(&m) } else if m.Type == FORWARD { t.handleReverse(&m) } else if c := t.chans.get(m.TID); c != nil { // route the message to the handler by TID c <- &m } else { log.Info("invalid TID: %v", m.TID) } } close(t.quit) // signal to all listeners that this tunnel is outa here t.transport.Close() for _, ch := range t.chans.dropAll() { close(ch) } log.Info("mux exit: %v", err) }
func webStart(port int, root string) { // Initialize templates templates := filepath.Join(root, "templates") log.Info("compiling templates from %s", templates) web.Templates = template.New("minimega-templates") filepath.Walk(templates, func(path string, info os.FileInfo, err error) error { if err != nil { log.Error("failed to load template from %s", path) return nil } if !info.IsDir() && strings.HasSuffix(path, ".html") { web.Templates.ParseFiles(path) } return nil }) mux := http.NewServeMux() for _, v := range []string{"novnc", "libs", "include"} { path := fmt.Sprintf("/%s/", v) dir := http.Dir(filepath.Join(root, v)) mux.Handle(path, http.StripPrefix(path, http.FileServer(dir))) } mux.HandleFunc("/", webVMs) mux.HandleFunc("/map", webMapVMs) mux.HandleFunc("/screenshot/", webScreenshot) mux.HandleFunc("/hosts", webHosts) mux.HandleFunc("/tags", webVMTags) mux.HandleFunc("/tiles", webTileVMs) mux.HandleFunc("/graph", webGraph) mux.HandleFunc("/json", webJSON) mux.HandleFunc("/vnc/", webVNC) mux.HandleFunc("/ws/", vncWsHandler) if web.Server == nil { web.Server = &http.Server{ Addr: fmt.Sprintf(":%d", port), Handler: mux, } err := web.Server.ListenAndServe() if err != nil { log.Error("web: %v", err) web.Server = nil } else { web.Port = port web.Running = true } } else { log.Info("web: changing web root to: %s", root) if port != web.Port && port != defaultWebPort { log.Error("web: changing web's port is not supported") } // just update the mux web.Server.Handler = mux } }
// updateClient updates the client fields and pushes the VM tags state func (s *Server) updateClient(cin *Client) { s.clientLock.Lock() defer s.clientLock.Unlock() c, ok := s.clients[cin.UUID] if !ok { // the client probably disconnected between sending the heartbeat and // us processing it. We'll still process any command responses. log.Info("unknown client %v", cin.UUID) return } c.Client = cin c.Checkin = time.Now() vm, ok := s.vms[cin.UUID] if !ok { // see above log.Info("unregistered client %v", cin.UUID) return } for k, v := range cin.Tags { vm.SetTag(k, v) } }
func (m *loggingMutex) Unlock() { _, file, line, _ := runtime.Caller(1) log.Info("unlocking: %v:%v", file, line) m.Mutex.Unlock() log.Info("unlocked: %v:%v", file, line) }
func (vms VMs) kill(target string) []error { killedVms := map[int]bool{} errs := expandVmTargets(target, false, func(vm VM, _ bool) (bool, error) { if vm.GetState()&VM_KILLABLE == 0 { return false, nil } if err := vm.Kill(); err != nil { log.Error("unleash the zombie VM: %v", err) } else { killedVms[vm.GetID()] = true } return true, nil }) outer: for len(killedVms) > 0 { select { case id := <-killAck: log.Info("VM %v killed", id) delete(killedVms, id) case <-time.After(COMMAND_TIMEOUT * time.Second): log.Error("vm kill timeout") break outer } } for id := range killedVms { log.Info("VM %d failed to acknowledge kill", id) } return errs }
func (vm *ContainerVM) Start() error { if vm.State&VM_RUNNING != 0 { return nil } if vm.State == VM_QUIT || vm.State == VM_ERROR { log.Info("restarting VM: %v", vm.ID) ack := make(chan int) vm.kill = make(chan bool) go vm.launch(ack) log.Debug("ack restarted VM %v", <-ack) } log.Info("starting VM: %v", vm.ID) freezer := filepath.Join(CGROUP_PATH, fmt.Sprintf("%v", vm.ID), "freezer.state") err := ioutil.WriteFile(freezer, []byte("THAWED"), 0644) if err != nil { return err } vm.setState(VM_RUNNING) return nil }
// recvFiles retrieves a list of files from the ron server by requesting each // one individually. func recvFiles(files []*ron.File) { start := time.Now() var size int64 for _, v := range files { log.Info("requesting file %v", v) dst := filepath.Join(*f_path, "files", v.Name) if _, err := os.Stat(dst); err == nil { // file exists (TODO: overwrite?) log.Info("skipping %v -- already exists") continue } m := &ron.Message{ Type: ron.MESSAGE_FILE, UUID: Client.UUID, Filename: v.Name, } if err := sendMessage(m); err != nil { log.Error("send failed: %v", err) return } resp := <-Client.fileChan if resp.Filename != v.Name { log.Error("filename mismatch: %v != %v", resp.Filename, v.Name) continue } if resp.Error != "" { log.Error("%v", resp.Error) continue } dir := filepath.Dir(dst) if err := os.MkdirAll(dir, os.FileMode(0770)); err != nil { log.Errorln(err) continue } if err := ioutil.WriteFile(dst, resp.File, v.Perm); err != nil { log.Errorln(err) continue } size += int64(len(resp.File)) } d := time.Since(start) rate := (float64(size) / 1024 / d.Seconds()) log.Debug("received %v bytes in %v (%v KBps)", size, d, rate) return }
func runCommand(command []string, background bool) (string, string) { var stdout bytes.Buffer var stderr bytes.Buffer path, err := exec.LookPath(command[0]) if err != nil { log.Errorln(err) return "", err.Error() } cmd := &exec.Cmd{ Path: path, Args: command, Stdout: &stdout, Stderr: &stderr, } log.Info("executing: %v", command) if background { log.Debug("starting in background") if err := cmd.Start(); err != nil { log.Errorln(err) return "", stderr.String() } pid := cmd.Process.Pid Client.Lock() defer Client.Unlock() Client.Processes[pid] = &Process{ PID: pid, Command: command, process: cmd.Process, } go func() { cmd.Wait() log.Info("command exited: %v", command) if stdout.Len() > 0 { log.Info(stdout.String()) } if stderr.Len() > 0 { log.Info(stderr.String()) } Client.Lock() defer Client.Unlock() delete(Client.Processes, pid) }() return "", "" } if err := cmd.Run(); err != nil { log.Errorln(err) } return stdout.String(), stderr.String() }
func cliShell(c *minicli.Command, resp *minicli.Response, background bool) error { var sOut bytes.Buffer var sErr bytes.Buffer p, err := exec.LookPath(c.ListArgs["command"][0]) if err != nil { return err } args := []string{p} if len(c.ListArgs["command"]) > 1 { args = append(args, c.ListArgs["command"][1:]...) } cmd := &exec.Cmd{ Path: p, Args: args, Env: nil, Dir: "", Stdout: &sOut, Stderr: &sErr, } log.Info("starting: %v", args) if err := cmd.Start(); err != nil { return err } if background { go func() { if err := cmd.Wait(); err != nil { log.Error(err.Error()) return } log.Info("command %v exited", args) if out := sOut.String(); out != "" { log.Info(out) } if err := sErr.String(); err != "" { log.Info(err) } }() return nil } if err = cmd.Wait(); err != nil { return err } resp.Response = sOut.String() resp.Error = sErr.String() return nil }
func runTests() { mm, err := miniclient.Dial(*f_base) if err != nil { log.Fatal("%v", err) } if *f_preamble != "" { out, err := runCommands(mm, *f_preamble) if err != nil { log.Fatal("%v", err) } log.Info(out) } // TODO: Should we quit minimega and restart it between each test? //quit := mustCompile(t, "quit 2") files, err := ioutil.ReadDir(*f_testDir) if err != nil { log.Fatal("%v", err) } for _, info := range files { if strings.HasSuffix(info.Name(), ".want") || strings.HasSuffix(info.Name(), ".got") { continue } log.Info("Running commands from %s", info.Name()) fpath := path.Join(*f_testDir, info.Name()) got, err := runCommands(mm, fpath) if err != nil { log.Fatal("%v", err) } // Record the output for offline comparison if err := ioutil.WriteFile(fpath+".got", []byte(got), os.FileMode(0644)); err != nil { log.Error("unable to write `%s` -- %v", fpath+".got", err) } want, err := ioutil.ReadFile(fpath + ".want") if err != nil { log.Error("unable to read file `%s` -- %v", fpath+".want", err) continue } if got != string(want) { log.Error("got != want for %s", info.Name()) } //mm.runCommand(quit) } }
func httpImageHandler(w http.ResponseWriter, r *http.Request) { start := time.Now().UnixNano() w.Write(httpImage) stop := time.Now().UnixNano() elapsed := uint64(stop - start) if r.TLS != nil { log.Info("https %v %v %vns", r.RemoteAddr, r.URL, elapsed) hitTLSChan <- 1 } else { log.Info("http %v %v %vns", r.RemoteAddr, r.URL, elapsed) hitChan <- 1 } }
func webStart(port int, root string) { // Initialize templates var err error templates := filepath.Join(root, "templates", "*.html") log.Info("compiling templates from %s", templates) web.Templates, err = template.ParseGlob(templates) if err != nil { log.Error("failed to load templates from %s", templates) return } mux := http.NewServeMux() for _, v := range []string{"novnc", "d3", "include"} { path := fmt.Sprintf("/%s/", v) dir := http.Dir(filepath.Join(root, v)) mux.Handle(path, http.StripPrefix(path, http.FileServer(dir))) } mux.HandleFunc("/", webVMs) mux.HandleFunc("/map", webMapVMs) mux.HandleFunc("/screenshot/", webScreenshot) mux.HandleFunc("/hosts", webHosts) mux.HandleFunc("/tags", webVMTags) mux.HandleFunc("/tiles", webTileVMs) mux.HandleFunc("/vnc/", webVNC) mux.HandleFunc("/ws/", vncWsHandler) if web.Server == nil { web.Server = &http.Server{ Addr: fmt.Sprintf(":%d", port), Handler: mux, } err := web.Server.ListenAndServe() if err != nil { log.Error("web: %v", err) web.Server = nil } else { web.Port = port web.Running = true } } else { log.Info("web: changing web root to: %s", root) if port != web.Port && port != defaultWebPort { log.Error("web: changing web's port is not supported") } // just update the mux web.Server.Handler = mux } }
func ksmGetIntFromFile(filename string) int { buffer, err := ioutil.ReadFile(filename) if err != nil { log.Fatalln(err) } b := strings.TrimSpace(string(buffer)) log.Info("read: %v", b) run, err := strconv.Atoi(b) if err != nil { log.Errorln(err) teardown() } log.Info("got %v from %v", int(run), filename) return int(run) }
func killAll(needle string) { Client.Lock() defer Client.Unlock() log.Info("killing all processes matching `%v`", needle) for _, p := range Client.Processes { if strings.Contains(strings.Join(p.Command, " "), needle) { log.Info("killing matched process: %v", p.Command) if err := p.process.Kill(); err != nil { log.Errorln(err) } } } }
func vncWsHandler(ws *websocket.Conn) { // URL should be of the form `/ws/<vm_name>` path := strings.Trim(ws.Config().Location.Path, "/") fields := strings.Split(path, "/") if len(fields) != 2 { return } vmName := fields[1] vms := GlobalVMs() vm, err := vms.findKvmVM(vmName) if err != nil { log.Errorln(err) return } // Undocumented "feature" of websocket -- need to set to PayloadType in // order for a direct io.Copy to work. ws.PayloadType = websocket.BinaryFrame // connect to the remote host rhost := fmt.Sprintf("%v:%v", vm.Host, vm.VNCPort) remote, err := net.Dial("tcp", rhost) if err != nil { log.Errorln(err) return } defer remote.Close() go io.Copy(ws, remote) io.Copy(remote, ws) log.Info("ws client disconnected from %v", rhost) }
func (b *Bridge) destroyNetflow() error { log.Info("destroying netflow on %v", b.Name) if b.nf == nil { return fmt.Errorf("bridge has no netflow object") } b.nf.Stop() // disconnect openvswitch from netflow object args := []string{ "clear", "Bridge", b.Name, "netflow", } if _, err := ovsCmdWrapper(args); err != nil { return fmt.Errorf("destroy netflow failed: %v", err) } b.nf = nil return nil }
// cliPreprocess performs expansion on a single string and returns the update // string or an error. func cliPreprocess(v string) (string, error) { if u, err := url.Parse(v); err == nil { switch u.Scheme { case "file": log.Debug("file preprocessor") return iomHelper(u.Opaque) case "http", "https": log.Debug("http/s preprocessor") // Check if we've already downloaded the file v2, err := iomHelper(u.Path) if err == nil { return v2, err } if err.Error() == "file not found" { log.Info("attempting to download %v", u) // Try to download the file, save to files dst := filepath.Join(*f_iomBase, u.Path) if err := wget(v, dst); err != nil { return "", err } return dst, nil } return "", err } } return v, nil }
// generate a random ipv4 mac address and return as a string func randomMac() string { b := make([]byte, 5) rand.Read(b) mac := fmt.Sprintf("00:%02x:%02x:%02x:%02x:%02x", b[0], b[1], b[2], b[3], b[4]) log.Info("generated mac: %v", mac) return mac }
func httpTLSClient(protocol string) { log.Debugln("httpTLSClient") t := NewEventTicker(*f_mean, *f_stddev, *f_min, *f_max) transport := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, Proxy: http.ProxyFromEnvironment, Dial: func(network, addr string) (net.Conn, error) { return net.Dial(protocol, addr) }, } client := &http.Client{ Transport: transport, } for { t.Tick() h, o := randomHost() log.Debug("https host %v from %v", h, o) elapsed := httpTLSClientRequest(h, client) if elapsed != 0 { log.Info("https %v %vns", client, elapsed) } httpTLSReportChan <- 1 } }
func (b *Bridge) destroyMirror() error { log.Info("destroying mirror on bridge: %v", b.Name) if b.mirror == "" { return fmt.Errorf("bridge does not have a mirror") } // delete the mirror for this bridge args := []string{ "clear", "bridge", b.Name, "mirrors", } if _, err := ovsCmdWrapper(args); err != nil { return fmt.Errorf("remove mirror failed: %v", err) } // delete the associated host tap if err := b.destroyTap(b.mirror); err != nil { return err } b.mirror = "" return nil }
// iomHelper supports grabbing files for internal minimega operations. It // returns the local path of the file or an error if the file doesn't exist or // could not transfer. iomHelper blocks until all file transfers are completed. func iomHelper(file string) (string, error) { err := iom.Get(file) if err != nil { return "", err } iomWait(file) dst := filepath.Join(*f_iomBase, file) info, err := diskInfo(dst) if err == nil && info.BackingFile != "" { // try to fetch backing image too file := filepath.Clean(info.BackingFile) if !strings.HasPrefix(file, *f_iomBase) { return "", fmt.Errorf("cannot fetch backing image from outside files directory: %v", file) } file, err = filepath.Rel(*f_iomBase, file) if err != nil { return "", err } log.Info("fetching backing image: %v", file) if _, err := iomHelper(file); err != nil { return "", fmt.Errorf("failed to fetch backing image %v: %v", file, err) } } return dst, nil }
func commandHandler() { for commands := range Client.commandChan { var ids []int for k, _ := range commands { ids = append(ids, k) } sort.Ints(ids) for _, id := range ids { log.Debug("ron commandHandler: %v", id) if id <= Client.CommandCounter { continue } if !Client.Matches(commands[id].Filter) { continue } log.Debug("ron commandHandler match: %v", id) processCommand(commands[id]) } } log.Info("command handler exit") }