Exemplo n.º 1
0
// broadcastListener listens for broadcast connection solicitations and connects to
// soliciting nodes.
func (n *Node) broadcastListener() {
	listenAddr := net.UDPAddr{
		IP:   net.IPv4(0, 0, 0, 0),
		Port: n.port,
	}
	ln, err := net.ListenUDP("udp4", &listenAddr)
	if err != nil {
		log.Fatal("broadcastListener: %v", err)
	}
	for {
		d := make([]byte, 1024)
		read, _, err := ln.ReadFromUDP(d)
		if err != nil {
			log.Error("broadcastListener ReadFromUDP: %v", err)
			continue
		}
		data := strings.Split(string(d[:read]), ":")
		if len(data) != 3 {
			log.Warn("got malformed udp data: %v", data)
			continue
		}
		if data[0] != "meshage" {
			log.Warn("got malformed udp data: %v", data)
			continue
		}
		namespace := data[1]
		host := data[2]
		if namespace != n.namespace {
			log.Debug("got solicitation from namespace %v, dropping", namespace)
			continue
		}
		if host == n.name {
			log.Debugln("got solicitation from myself, dropping")
			continue
		}
		log.Debug("got solicitation from %v", host)

		// to avoid spamming the node with connections, only 1/8 of the
		// nodes should try to connect. If there are < 16 nodes, then
		// always try.
		if len(n.clients) > SOLICIT_LIMIT {
			s := rand.NewSource(time.Now().UnixNano())
			r := rand.New(s)
			n := r.Intn(SOLICIT_RATIO)
			if n != 0 {
				log.Debugln("randomly skipping this solicitation")
				continue
			}
		}
		go n.dial(host, true)
	}
}
Exemplo n.º 2
0
// ovsAddPort adds a port to an openvswitch bridge. If the vlan is 0, it will
// not be vlan-tagged.
func ovsAddPort(bridge, tap string, vlan int, host bool) error {
	args := []string{
		"add-port",
		bridge,
		tap,
	}

	// see note in ovsAddBridge.
	if len(tap) > 15 {
		log.Warn("tap name is longer than 15 characters.. dragons ahead")
	}

	if vlan != 0 {
		args = append(args, fmt.Sprintf("tag=%v", vlan))
	}

	if host {
		args = append(args, "--")
		args = append(args, "set")
		args = append(args, "Interface")
		args = append(args, tap)
		args = append(args, "type=internal")
	}

	if _, err := ovsCmdWrapper(args); err == errAlreadyExists {
		return err
	} else if err != nil {
		return fmt.Errorf("add port failed: %v", err)
	}

	return nil
}
Exemplo n.º 3
0
// hostVMs is HostVMs without locking cmdLock.
func hostVMs(host string) VMs {
	// Compile info command and set it not to record
	cmd := minicli.MustCompile("vm info")
	cmd.SetRecord(false)
	cmd.SetSource(GetNamespaceName())

	cmds := makeCommandHosts([]string{host}, cmd)

	var vms VMs

	// LOCK: see func description.
	for resps := range runCommands(cmds...) {
		for _, resp := range resps {
			if resp.Error != "" {
				log.Errorln(resp.Error)
				continue
			}

			if vms2, ok := resp.Data.(VMs); ok {
				if vms != nil {
					// odd... should only be one vms per host and we're
					// querying a single host
					log.Warn("so many vms")
				}
				vms = vms2
			}
		}
	}

	return vms
}
Exemplo n.º 4
0
// SetRange reserves a range of VLANs for a particular prefix. VLANs are
// allocated in the range [min, max).
func (v *AllocatedVLANs) SetRange(prefix string, min, max int) error {
	v.Lock()
	defer v.Unlock()

	log.Info("setting range for %v: [%v, %v)", prefix, min, max)

	// Test for conflicts with other ranges
	for prefix2, r := range v.ranges {
		if prefix == prefix2 || prefix2 == "" {
			continue
		}

		if min < r.Max && r.Min <= max {
			return fmt.Errorf("range overlaps with another namespace: %v", prefix2)
		}
	}

	// Warn if we detect any holes in the range
	for i := min; i < max; i++ {
		if _, ok := v.byVLAN[i]; ok {
			log.Warn("detected hole in VLAN range %v -> %v: %v", min, max, i)
		}
	}

	v.ranges[prefix] = &Range{
		Min:  min,
		Max:  max,
		Next: min,
	}

	return nil
}
Exemplo n.º 5
0
// Process a prepopulated Command
func ProcessCommand(c *Command) <-chan Responses {
	if !c.noOp && c.Call == nil {
		log.Fatal("command %v has no callback!", c)
	}

	respChan := make(chan Responses)

	go func() {
		if !c.noOp {
			c.Call(c, respChan)
		}

		// Append the command to the history
		if c.Record {
			history = append(history, c.Original)

			if len(history) > HistoryLen && HistoryLen > 0 {
				if firstHistoryTruncate {
					log.Warn("history length exceeds limit, truncating to %v entries", HistoryLen)
					firstHistoryTruncate = false
				}

				history = history[len(history)-HistoryLen:]
			}
		}

		close(respChan)
	}()

	return respChan
}
Exemplo n.º 6
0
func main() {
	// flags
	flag.Parse()

	logSetup()

	if *f_u != "" {
		log.Debug("updating with file: %v", *f_u)

		err := update(filepath.Join(*f_path, "minirouter"), *f_u)
		if err != nil {
			log.Errorln(err)
		}

		return
	}

	// check for a running instance of minirouter
	_, err := os.Stat(filepath.Join(*f_path, "minirouter"))
	if err == nil {
		if !*f_force {
			log.Fatalln("minirouter appears to already be running, override with -force")
		}
		log.Warn("minirouter may already be running, proceed with caution")
		err = os.Remove(filepath.Join(*f_path, "minirouter"))
		if err != nil {
			log.Fatalln(err)
		}
	}

	log.Debug("using path: %v", *f_path)

	// attempt to set up the base path
	err = os.MkdirAll(*f_path, os.FileMode(0770))
	if err != nil {
		log.Fatal("mkdir base path: %v", err)
	}

	// start the domain socket service
	go commandSocketStart()

	// signal handling
	sig := make(chan os.Signal, 1024)
	signal.Notify(sig, os.Interrupt, syscall.SIGTERM)

	<-sig

	// cleanup
	err = os.Remove(filepath.Join(*f_path, "minirouter"))
	if err != nil {
		log.Fatalln(err)
	}
}
Exemplo n.º 7
0
// ParseVLAN parses s and returns a VLAN. If s can be parsed as an integer, the
// resulting integer is returned. If s matches an existing alias, that VLAN is
// returned. Otherwise, returns ErrUnallocated.
func (v *AllocatedVLANs) ParseVLAN(namespace, s string) (int, error) {
	v.Lock()
	defer v.Unlock()

	log.Debug("parsing vlan: %v namespace: %v", s, namespace)

	vlan, err := strconv.Atoi(s)
	if err == nil {
		// Check to ensure that VLAN is sane
		if vlan < 0 || vlan >= 4096 {
			return 0, errors.New("invalid VLAN (0 <= vlan < 4096)")
		}

		if alias, ok := v.byVLAN[vlan]; ok && alias != BlacklistedVLAN {
			// Warn the user if they supplied an integer and it matches a VLAN
			// that has an alias.
			log.Warn("VLAN %d has alias %v", vlan, alias)
		} else if !ok {
			// Blacklist the VLAN if the user entered it manually and we don't
			// have an alias for it already.
			log.Warn("Blacklisting manually specified VLAN %v", vlan)
			v.blacklist(vlan)
		}

		return vlan, nil
	}

	// Prepend active namespace if it doesn't look like the user is trying to
	// supply a namespace already.
	if !strings.Contains(s, AliasSep) {
		s = namespace + AliasSep + s
	}

	if vlan, ok := v.byAlias[s]; ok {
		return vlan, nil
	}

	return 0, ErrUnallocated
}
Exemplo n.º 8
0
// RevertNamespace reverts the active namespace (which should match curr) back
// to the old namespace.
func RevertNamespace(old, curr *Namespace) {
	namespaceLock.Lock()
	defer namespaceLock.Unlock()

	// This is very odd and should *never* happen unless something has gone
	// horribly wrong.
	if namespace != curr.Name {
		log.Warn("unexpected namespace, `%v` != `%v`, when reverting to `%v`", namespace, curr, old)
	}

	if old == nil {
		namespace = ""
	} else {
		namespace = old.Name
	}
}
Exemplo n.º 9
0
// diskInjectCleanup handles unmounting, disconnecting nbd, and removing mount
// directory after diskInject.
func diskInjectCleanup(mntDir, nbdPath string) {
	log.Debug("cleaning up vm inject: %s %s", mntDir, nbdPath)

	out, err := processWrapper("umount", mntDir)
	if err != nil {
		log.Error("injectCleanup: %v, %v", out, err)
	}

	if err := nbd.DisconnectDevice(nbdPath); err != nil {
		log.Error("qemu nbd disconnect: %v", err)
		log.Warn("minimega was unable to disconnect %v", nbdPath)
	}

	err = os.Remove(mntDir)
	if err != nil {
		log.Error("rm mount dir: %v", err)
	}
}
Exemplo n.º 10
0
// meshageRecipients expands a hosts into a list of hostnames. Supports
// expanding Wildcard to all hosts in the mesh or all hosts in the active
// namespace.
func meshageRecipients(hosts string) ([]string, error) {
	ns := GetNamespace()

	if hosts == Wildcard {
		if ns == nil {
			return meshageNode.BroadcastRecipients(), nil
		}

		recipients := []string{}

		// Wildcard expands to all hosts in the namespace, except the local
		// host, if included
		for host := range ns.Hosts {
			if host == hostname {
				log.Info("excluding localhost, %v, from `%v`", hostname, Wildcard)
				continue
			}

			recipients = append(recipients, host)
		}

		return recipients, nil
	}

	recipients, err := ranges.SplitList(hosts)
	if err != nil {
		return nil, err
	}

	// If a namespace is active, warn if the user is trying to mesh send hosts
	// outside the namespace
	if ns != nil {
		for _, host := range recipients {
			if !ns.Hosts[host] {
				log.Warn("%v is not part of namespace %v", host, ns.Name)
			}
		}
	}

	return recipients, nil
}
Exemplo n.º 11
0
// gets a new tap from tapChan and verifies that it doesn't already exist
func getNewTap() (string, error) {
	var t string
	for {
		t = <-tapChan
		taps, err := ioutil.ReadDir("/sys/class/net")
		if err != nil {
			return "", err
		}
		found := false
		for _, v := range taps {
			if v.Name() == t {
				found = true
				log.Warn("tap %v already exists, trying again", t)
			}
		}
		if !found {
			break
		}
	}
	return t, nil
}
Exemplo n.º 12
0
// ovsAddBridge creates a new openvswitch bridge. Returns whether the bridge
// was created or not, or any error that occurred.
func ovsAddBridge(name string) (bool, error) {
	args := []string{
		"add-br",
		name,
	}

	// Linux limits interfaces to IFNAMSIZ bytes which is currently 16,
	// including the null byte. We won't return an error as this limit may not
	// affect the user but we should at least warn them that openvswitch may
	// fail unexectedly.
	if len(name) > 15 {
		log.Warn("bridge name is longer than 15 characters.. dragons ahead")
	}

	if _, err := ovsCmdWrapper(args); err == errAlreadyExists {
		return false, nil
	} else if err != nil {
		return false, fmt.Errorf("add bridge failed: %v", err)
	}

	return true, nil
}
Exemplo n.º 13
0
func (vm *BaseVM) conflicts(vm2 *BaseVM) error {
	// Return error if two VMs have same name or UUID
	if vm.Namespace == vm2.Namespace {
		if vm.Name == vm2.Name {
			return fmt.Errorf("duplicate VM name: %s", vm.Name)
		}

		if vm.UUID == vm2.UUID {
			return fmt.Errorf("duplicate VM UUID: %s", vm.UUID)
		}
	}

	// Warn if we see two VMs that share a MAC on the same VLAN
	for _, n := range vm.Networks {
		for _, n2 := range vm2.Networks {
			if n.MAC == n2.MAC && n.VLAN == n2.VLAN {
				log.Warn("duplicate MAC/VLAN: %v/%v for %v and %v", vm.ID, vm2.ID)
			}
		}
	}

	return nil
}
Exemplo n.º 14
0
// Unmount, disconnect nbd, and remove mount directory
func vmInjectCleanup(mntDir, nbdPath string) {
	log.Debug("cleaning up vm inject: %s %s", mntDir, nbdPath)

	p := process("umount")
	cmd := exec.Command(p, mntDir)
	err := cmd.Run()
	if err != nil {
		log.Error("injectCleanup: %v", err)
	}

	err = nbd.DisconnectDevice(nbdPath)
	if err != nil {
		log.Error("qemu nbd disconnect: %v", err)
		log.Warn("minimega was unable to disconnect %v", nbdPath)
	}

	p = process("rm")
	cmd = exec.Command(p, "-r", mntDir)
	err = cmd.Run()
	if err != nil {
		log.Error("rm mount dir: %v", err)
	}
}
Exemplo n.º 15
0
func (n *Node) getRoutes(m *Message) (map[string][]string, error) {
	routeSlices := make(map[string][]string)
	n.meshLock.Lock()
	defer n.meshLock.Unlock()

	for _, v := range m.Recipients {
		if v == n.name {
			if len(m.Recipients) == 1 {
				return nil, fmt.Errorf("cannot mesh send yourself")
			}
			continue
		}

		var route string
		var ok bool
		if route, ok = n.routes[v]; !ok {
			log.Warn("no route to host: %v, skipping", v)
			continue
		}
		routeSlices[route] = append(routeSlices[route], v)
	}

	return routeSlices, nil
}
Exemplo n.º 16
0
// local command line interface, wrapping readline
func cliLocal() {
	goreadline.FilenameCompleter = iomCompleter

	sig := make(chan os.Signal, 1)
	signal.Notify(sig, os.Interrupt)
	go func() {
		for range sig {
			goreadline.Signal()
		}
	}()
	defer signal.Stop(sig)

	for {
		namespace := GetNamespaceName()

		prompt := "minimega$ "
		if namespace != "" {
			prompt = fmt.Sprintf("minimega[%v]$ ", namespace)
		}

		line, err := goreadline.Readline(prompt, true)
		if err != nil {
			return
		}
		command := string(line)
		log.Debug("got from stdin: `%v`", command)

		cmd, err := minicli.Compile(command)
		if err != nil {
			log.Error("%v", err)
			//fmt.Printf("closest match: TODO\n")
			continue
		}

		// No command was returned, must have been a blank line or a comment
		// line. Either way, don't try to run a nil command.
		if cmd == nil {
			continue
		}

		// HAX: Don't record the read command
		if hasCommand(cmd, "read") {
			cmd.SetRecord(false)
		}

		// The namespace changed between when we prompted the user (and could
		// still change before we actually run the command).
		if namespace != GetNamespaceName() {
			// TODO: should we abort the command?
			log.Warn("namespace changed between prompt and execution")
		}

		for resp := range RunCommands(cmd) {
			// print the responses
			minipager.DefaultPager.Page(resp.String())

			errs := resp.Error()
			if errs != "" {
				fmt.Fprintln(os.Stderr, errs)
			}
		}
	}
}
Exemplo n.º 17
0
// launch is the low-level launch function for KVM VMs. The caller should hold
// the VM's lock.
func (vm *KvmVM) launch() error {
	log.Info("launching vm: %v", vm.ID)

	// If this is the first time launching the VM, do the final configuration
	// check and create a directory for it.
	if vm.State == VM_BUILDING {
		if err := os.MkdirAll(vm.instancePath, os.FileMode(0700)); err != nil {
			teardownf("unable to create VM dir: %v", err)
		}
	}

	// write the config for this vm
	config := vm.BaseConfig.String() + vm.KVMConfig.String()
	writeOrDie(vm.path("config"), config)
	writeOrDie(vm.path("name"), vm.Name)

	// create and add taps if we are associated with any networks
	for i := range vm.Networks {
		nic := &vm.Networks[i]
		log.Info("%#v", nic)

		br, err := getBridge(nic.Bridge)
		if err != nil {
			log.Error("get bridge: %v", err)
			vm.setError(err)
			return err
		}

		nic.Tap, err = br.CreateTap(nic.Tap, nic.MAC, nic.VLAN)
		if err != nil {
			log.Error("create tap: %v", err)
			vm.setError(err)
			return err
		}
	}

	if len(vm.Networks) > 0 {
		if err := vm.writeTaps(); err != nil {
			log.Errorln(err)
			vm.setError(err)
			return err
		}
	}

	var args []string
	var sOut bytes.Buffer
	var sErr bytes.Buffer

	vmConfig := VMConfig{BaseConfig: vm.BaseConfig, KVMConfig: vm.KVMConfig}
	args = vmConfig.qemuArgs(vm.ID, vm.instancePath)
	args = ParseQemuOverrides(args)
	log.Debug("final qemu args: %#v", args)

	cmd := &exec.Cmd{
		Path:   process("qemu"),
		Args:   args,
		Stdout: &sOut,
		Stderr: &sErr,
	}

	if err := cmd.Start(); err != nil {
		err = fmt.Errorf("start qemu: %v %v", err, sErr.String())
		log.Errorln(err)
		vm.setError(err)
		return err
	}

	vm.pid = cmd.Process.Pid
	log.Debug("vm %v has pid %v", vm.ID, vm.pid)

	vm.CheckAffinity()

	// Channel to signal when the process has exited
	var waitChan = make(chan bool)

	// Create goroutine to wait for process to exit
	go func() {
		defer close(waitChan)
		err := cmd.Wait()

		vm.lock.Lock()
		defer vm.lock.Unlock()

		// Check if the process quit for some reason other than being killed
		if err != nil && err.Error() != "signal: killed" {
			log.Error("kill qemu: %v %v", err, sErr.String())
			vm.setError(err)
		} else if vm.State != VM_ERROR {
			// Set to QUIT unless we've already been put into the error state
			vm.setState(VM_QUIT)
		}

		// Kill the VNC shim, if it exists
		if vm.vncShim != nil {
			vm.vncShim.Close()
		}
	}()

	if err := vm.connectQMP(); err != nil {
		// Failed to connect to qmp so clean up the process
		cmd.Process.Kill()

		log.Errorln(err)
		vm.setError(err)
		return err
	}

	go qmpLogger(vm.ID, vm.q)

	if err := vm.connectVNC(); err != nil {
		// Failed to connect to vnc so clean up the process
		cmd.Process.Kill()

		log.Errorln(err)
		vm.setError(err)
		return err
	}

	// connect cc
	ccPath := vm.path("cc")
	if err := ccNode.DialSerial(ccPath); err != nil {
		log.Warn("unable to connect to cc for vm %v: %v", vm.ID, err)
	}

	// Create goroutine to wait to kill the VM
	go func() {
		select {
		case <-waitChan:
			log.Info("VM %v exited", vm.ID)
		case <-vm.kill:
			log.Info("Killing VM %v", vm.ID)
			cmd.Process.Kill()
			<-waitChan
			killAck <- vm.ID
		}
	}()

	return nil
}
Exemplo n.º 18
0
// reentrant read routine. Will be called recursively if a 'parents' key exists in the config file
func read(path string, c *Config) error {
	f, err := os.Open(path)
	if err != nil {
		if strings.Contains(err.Error(), "no such file") { // file doesn't exist, let's try some path magic
			if path == c.Path {
				return err
			}
			newpath := filepath.Join(filepath.Dir(c.Path), filepath.Base(path))
			f, err = os.Open(newpath)
			if err != nil {
				return err
			}
			log.Warn("could not find %v, but found a similar one at %v, using that instead", path, newpath)
		} else {
			return err
		}
	}
	defer f.Close()

	var s scanner.Scanner
	s.Init(f)
	tok := s.Scan()
	for tok != scanner.EOF {
		pos := s.Pos()
		if tok != scanner.Ident {
			err = fmt.Errorf("%s:%s malformed config: %s, expected identifier, got %s", path, pos, s.TokenText(), scanner.TokenString(tok))
			return err
		}
		k := s.TokenText()
		tok = s.Scan()
		if tok != '=' {
			err = fmt.Errorf("%s:%s malformed config: %s, expected '=', got %s", path, pos, s.TokenText(), scanner.TokenString(tok))
			return err
		}
		tok = s.Scan()
		if tok != scanner.String {
			err = fmt.Errorf("%s:%s malformed config %s, expected string, got %s", path, pos, s.TokenText(), scanner.TokenString(tok))
			return err
		}

		v := strings.Trim(s.TokenText(), "\"`")
		d := strings.Fields(v)
		switch k {
		case "parents":
			for _, i := range d {
				log.Infoln("reading config:", i)
				err = read(i, c)
				c.Parents = append(c.Parents, i)
				if err != nil {
					return err
				}
			}
		case "packages":
			c.Packages = append(c.Packages, d...)
		case "overlay":
			// trim any trailing "/"
			for i, j := range d {
				d[i] = strings.TrimRight(j, "/")
			}
			c.Overlays = append(c.Overlays, d...)
		case "postbuild":
			c.Postbuilds = append(c.Postbuilds, v)
		default:
			err = fmt.Errorf("invalid key %s", k, d)
			return err
		}
		tok = s.Scan()
	}
	return nil
}
Exemplo n.º 19
0
// processVMNet processes the input specifying the bridge, vlan, and mac for
// one interface to a VM and updates the vm config accordingly. This takes a
// bit of parsing, because the entry can be in a few forms:
// 	vlan
//
//	vlan,mac
//	bridge,vlan
//	vlan,driver
//
//	bridge,vlan,mac
//	vlan,mac,driver
//	bridge,vlan,driver
//
//	bridge,vlan,mac,driver
// If there are 2 or 3 fields, just the last field for the presence of a mac
func processVMNet(spec string) (res NetConfig, err error) {
	// example: my_bridge,100,00:00:00:00:00:00
	f := strings.Split(spec, ",")

	var b, v, m, d string
	switch len(f) {
	case 1:
		v = f[0]
	case 2:
		if isMac(f[1]) {
			// vlan, mac
			v, m = f[0], f[1]
		} else if isNetworkDriver(f[1]) {
			// vlan, driver
			v, d = f[0], f[1]
		} else {
			// bridge, vlan
			b, v = f[0], f[1]
		}
	case 3:
		if isMac(f[2]) {
			// bridge, vlan, mac
			b, v, m = f[0], f[1], f[2]
		} else if isMac(f[1]) {
			// vlan, mac, driver
			v, m, d = f[0], f[1], f[2]
		} else {
			// bridge, vlan, driver
			b, v, d = f[0], f[1], f[2]
		}
	case 4:
		b, v, m, d = f[0], f[1], f[2], f[3]
	default:
		return NetConfig{}, errors.New("malformed netspec")
	}

	if d != "" && !isNetworkDriver(d) {
		return NetConfig{}, errors.New("malformed netspec, invalid driver: " + d)
	}

	log.Debug("got bridge=%v, vlan=%v, mac=%v, driver=%v", b, v, m, d)

	vlan, err := lookupVLAN(v)
	if err != nil {
		return NetConfig{}, err
	}

	if m != "" && !isMac(m) {
		return NetConfig{}, errors.New("malformed netspec, invalid mac address: " + m)
	}

	// warn on valid but not allocated macs
	if m != "" && !allocatedMac(m) {
		log.Warn("unallocated mac address: %v", m)
	}

	if b == "" {
		b = DefaultBridge
	}
	if d == "" {
		d = VM_NET_DRIVER_DEFAULT
	}

	return NetConfig{
		VLAN:   vlan,
		Bridge: b,
		MAC:    strings.ToLower(m),
		Driver: d,
	}, nil
}
Exemplo n.º 20
0
// dial another node, perform a handshake, and add the client to the client list if successful
func (n *Node) dial(host string, solicited bool) error {
	addr := fmt.Sprintf("%s:%d", host, n.port)
	log.Debug("dialing: %v", addr)

	conn, err := net.DialTimeout("tcp", addr, DEFAULT_TIMEOUT*time.Second)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		return fmt.Errorf("dial %v: %v", host, err)
	}

	c := &client{
		conn: conn,
		enc:  gob.NewEncoder(conn),
		dec:  gob.NewDecoder(conn),
		ack:  make(chan uint64, RECEIVE_BUFFER),
	}

	var remoteHost string
	err = c.dec.Decode(&remoteHost)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		conn.Close()
		return fmt.Errorf("dial %v: %v", host, err)
	}

	var remoteSolicited bool
	err = c.dec.Decode(&remoteSolicited)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		conn.Close()
		return fmt.Errorf("dial %v: %v", host, err)
	}

	var remoteVersion string
	err = c.dec.Decode(&remoteVersion)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		conn.Close()
		return fmt.Errorf("dial %v: %v", host, err)
	}
	if remoteVersion != n.version {
		log.Warn("remote node version mismatch on host %v: %v", host, remoteVersion)
	}

	// are we the remote host?
	if remoteHost == n.name {
		conn.Close()
		return errors.New("cannot mesh dial yourself")
	}

	// are we already connected to this node?
	if n.hasClient(remoteHost) {
		conn.Close()
		return fmt.Errorf("already connected to %v", remoteHost)
	}

	// we should hangup if the connection no longer wants solicited connections and we're solicited
	if solicited && !remoteSolicited {
		conn.Close()
		return nil
	}

	err = c.enc.Encode(n.name)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		conn.Close()
		return fmt.Errorf("dial %v: %v", host, err)
	}

	err = c.enc.Encode(n.version)
	if err != nil {
		if solicited {
			log.Error("dial %v: %v", host, err)
		}
		conn.Close()
		return fmt.Errorf("dial %v: %v", host, err)
	}

	c.name = remoteHost
	log.Debug("handshake from: %v", remoteHost)

	n.clientLock.Lock()
	n.clients[remoteHost] = c
	n.clientLock.Unlock()

	go n.clientHandler(remoteHost)
	return nil
}
Exemplo n.º 21
0
// processVMNet processes the input specifying the bridge, vlan, and mac for
// one interface to a VM and updates the vm config accordingly. This takes a
// bit of parsing, because the entry can be in a few forms:
// 	vlan
//
//	vlan,mac
//	bridge,vlan
//	vlan,driver
//
//	bridge,vlan,mac
//	vlan,mac,driver
//	bridge,vlan,driver
//
//	bridge,vlan,mac,driver
// If there are 2 or 3 fields, just the last field for the presence of a mac
func processVMNet(spec string) (res NetConfig, err error) {
	// example: my_bridge,100,00:00:00:00:00:00
	f := strings.Split(spec, ",")

	var b, v, m, d string
	switch len(f) {
	case 1:
		v = f[0]
	case 2:
		if isMac(f[1]) {
			// vlan, mac
			v, m = f[0], f[1]
		} else if _, err := strconv.Atoi(f[0]); err == nil {
			// vlan, driver
			v, d = f[0], f[1]
		} else {
			// bridge, vlan
			b, v = f[0], f[1]
		}
	case 3:
		if isMac(f[2]) {
			// bridge, vlan, mac
			b, v, m = f[0], f[1], f[2]
		} else if isMac(f[1]) {
			// vlan, mac, driver
			v, m, d = f[0], f[1], f[2]
		} else {
			// bridge, vlan, driver
			b, v, d = f[0], f[1], f[2]
		}
	case 4:
		b, v, m, d = f[0], f[1], f[2], f[3]
	default:
		err = errors.New("malformed netspec")
		return
	}

	log.Debug("vm_net got b=%v, v=%v, m=%v, d=%v", b, v, m, d)

	// VLAN ID, with optional bridge
	vlan, err := strconv.Atoi(v) // the vlan id
	if err != nil {
		err = errors.New("malformed netspec, vlan must be an integer")
		return
	}

	if m != "" && !isMac(m) {
		err = errors.New("malformed netspec, invalid mac address: " + m)
		return
	}

	// warn on valid but not allocated macs
	if m != "" && !allocatedMac(m) {
		log.Warn("unallocated mac address: %v", m)
	}

	if b == "" {
		b = DEFAULT_BRIDGE
	}
	if d == "" {
		d = VM_NET_DRIVER_DEFAULT
	}

	res = NetConfig{
		VLAN:   vlan,
		Bridge: b,
		MAC:    strings.ToLower(m),
		Driver: d,
	}

	return
}
Exemplo n.º 22
0
// newConnection processes a new incoming connection from another node, processes the connection
// handshake, adds the connection to the client list, and starts the client message handler.
func (n *Node) newConnection(conn net.Conn) {
	log.Debug("newConnection: %v", conn.RemoteAddr().String())

	// are we soliciting connections?
	var solicited bool
	if n.numClients() < n.degree {
		solicited = true
	} else {
		solicited = false
	}
	log.Debug("solicited: %v", solicited)

	c := &client{
		conn: conn,
		enc:  gob.NewEncoder(conn),
		dec:  gob.NewDecoder(conn),
		ack:  make(chan uint64, RECEIVE_BUFFER),
	}

	// the handshake involves the following:
	// 1.  We send our name, our solicitation status, and our version
	// 2a. If the connection is solicited but we're all full, the remote node simply hangs up
	// 2b. If the connection is unsolicited or solicited and we are still soliciting connections, the remote node responds with its name
	// 3.  The connection is valid, add it to our client list and broadcast a MSA announcing the new connection.
	// 4.  The remote node does the same as 3.
	err := c.enc.Encode(n.name)
	if err != nil {
		log.Error("newConnection encode name: %v: %v", n.name, err)
		c.conn.Close()
		return
	}

	err = c.enc.Encode(solicited)
	if err != nil {
		log.Error("newConnection encode solicited: %v: %v", n.name, err)
		c.conn.Close()
		return
	}

	err = c.enc.Encode(n.version)
	if err != nil {
		log.Error("newConnection encode version: %v: %v", n.name, err)
		c.conn.Close()
		return
	}

	var remoteHost string
	err = c.dec.Decode(&remoteHost)
	if err != nil {
		if err != io.EOF {
			log.Error("newConnection decode name: %v: %v", n.name, err)
		}
		c.conn.Close()
		return
	}

	var remoteVersion string
	err = c.dec.Decode(&remoteVersion)
	if err != nil {
		if err != io.EOF {
			log.Error("newConnection decode version: %v: %v", n.name, err)
		}
		c.conn.Close()
		return
	}
	if remoteVersion != n.version {
		log.Warn("remote node version mismatch on host %v: %v", remoteHost, remoteVersion)
	}

	c.name = remoteHost
	log.Debug("handshake from: %v", c.name)

	n.clientLock.Lock()
	n.clients[remoteHost] = c
	n.clientLock.Unlock()

	go n.clientHandler(remoteHost)
}
Exemplo n.º 23
0
// meshageSend sends a command to a list of hosts, returning a channel that the
// responses will be sent to. This is non-blocking -- the channel is created
// and then returned after a couple of sanity checks. Should be not be invoked
// as a goroutine as it checks the active namespace when expanding hosts.
func meshageSend(c *minicli.Command, hosts string) (<-chan minicli.Responses, error) {
	// HAX: Ensure we aren't sending read or mesh send commands over meshage
	if hasCommand(c, "read") || hasCommand(c, "mesh send") {
		return nil, fmt.Errorf("cannot run `%s` over mesh", c.Original)
	}

	// expand the hosts to a list of recipients, must be done synchronously
	recipients, err := meshageRecipients(hosts)
	if err != nil {
		return nil, err
	}

	meshageCommandLock.Lock()
	out := make(chan minicli.Responses)

	// Build a mesh command from the command, assigning a random ID
	meshageID := rand.Int31()
	meshageCmd := meshageCommand{Command: *c, TID: meshageID}

	go func() {
		defer meshageCommandLock.Unlock()
		defer close(out)

		recipients, err = meshageNode.Set(recipients, meshageCmd)
		if err != nil {
			out <- errResp(err)
			return
		}

		log.Debug("meshage sent, waiting on %d responses", len(recipients))

		// host -> response
		resps := map[string]*minicli.Response{}

		// wait on a response from each recipient
	recvLoop:
		for len(resps) < len(recipients) {
			select {
			case resp := <-meshageResponseChan:
				body := resp.Body.(meshageResponse)
				if body.TID != meshageID {
					log.Warn("invalid TID from response channel: %d", body.TID)
				} else {
					resps[body.Host] = &body.Response
				}
			case <-time.After(meshageTimeout):
				// Didn't hear back from any node within the timeout
				break recvLoop
			}
		}

		// Fill in the responses for recipients that timed out
		resp := minicli.Responses{}
		for _, host := range recipients {
			if v, ok := resps[host]; ok {
				resp = append(resp, v)
			} else if host != hostname {
				resp = append(resp, &minicli.Response{
					Host:  host,
					Error: "timed out",
				})
			}
		}

		out <- resp
	}()

	return out, nil
}
Exemplo n.º 24
0
// launch is the low-level launch function for Container VMs. The caller should
// hold the VM's lock.
func (vm *ContainerVM) launch() error {
	log.Info("launching vm: %v", vm.ID)

	err := containerInit()
	if err != nil {
		log.Errorln(err)
		vm.setError(err)
		return err
	}
	if !containerInitSuccess {
		err = fmt.Errorf("cgroups are not initialized, cannot continue")
		log.Errorln(err)
		vm.setError(err)
		return err
	}

	// If this is the first time launching the VM, do the final configuration
	// check, create a directory for it, and setup the FS.
	if vm.State == VM_BUILDING {
		if err := os.MkdirAll(vm.instancePath, os.FileMode(0700)); err != nil {
			teardownf("unable to create VM dir: %v", err)
		}

		if vm.Snapshot {
			if err := vm.overlayMount(); err != nil {
				log.Error("overlayMount: %v", err)
				vm.setError(err)
				return err
			}
		} else {
			vm.effectivePath = vm.FSPath
		}
	}

	// write the config for this vm
	config := vm.BaseConfig.String() + vm.ContainerConfig.String()
	writeOrDie(vm.path("config"), config)
	writeOrDie(vm.path("name"), vm.Name)

	// the child process will communicate with a fake console using pipes
	// to mimic stdio, and a fourth pipe for logging before the child execs
	// into the init program
	// two additional pipes are needed to synchronize freezing the child
	// before it enters the container
	parentLog, childLog, err := os.Pipe()
	if err != nil {
		log.Error("pipe: %v", err)
		vm.setError(err)
		return err
	}
	parentSync1, childSync1, err := os.Pipe()
	if err != nil {
		log.Error("pipe: %v", err)
		vm.setError(err)
		return err
	}
	childSync2, parentSync2, err := os.Pipe()
	if err != nil {
		log.Error("pipe: %v", err)
		vm.setError(err)
		return err
	}

	// create the uuid path that will bind mount into sysfs in the
	// container
	uuidPath := vm.path("uuid")
	ioutil.WriteFile(uuidPath, []byte(vm.UUID+"\n"), 0400)

	// create fifos
	for i := 0; i < vm.Fifos; i++ {
		p := vm.path(fmt.Sprintf("fifo%v", i))
		if err = syscall.Mkfifo(p, 0660); err != nil {
			log.Error("fifo: %v", err)
			vm.setError(err)
			return err
		}
	}

	//	0 :  minimega binary
	// 	1 :  CONTAINER
	//	2 :  instance path
	//	3 :  vm id
	//	4 :  hostname ("CONTAINER_NONE" if none)
	//	5 :  filesystem path
	//	6 :  memory in megabytes
	//	7 :  uuid
	//	8 :  number of fifos
	//	9 :  init program (relative to filesystem path)
	//	10:  init args
	hn := vm.Hostname
	if hn == "" {
		hn = CONTAINER_NONE
	}
	preinit := vm.Preinit
	if preinit == "" {
		preinit = CONTAINER_NONE
	}
	args := []string{
		os.Args[0],
		"-base",
		*f_base,
		CONTAINER_MAGIC,
		vm.instancePath,
		fmt.Sprintf("%v", vm.ID),
		hn,
		vm.effectivePath,
		vm.Memory,
		uuidPath,
		fmt.Sprintf("%v", vm.Fifos),
		preinit,
	}
	args = append(args, vm.Init...)

	// launch the container
	cmd := &exec.Cmd{
		Path: "/proc/self/exe",
		Args: args,
		ExtraFiles: []*os.File{
			childLog,
			childSync1,
			childSync2,
		},
		SysProcAttr: &syscall.SysProcAttr{
			Cloneflags: uintptr(CONTAINER_FLAGS),
		},
	}

	// Start the child and give it a pty
	pseudotty, err := pty.Start(cmd)
	if err != nil {
		vm.overlayUnmount()
		log.Error("start container: %v", err)
		vm.setError(err)
		return err
	}

	vm.pid = cmd.Process.Pid
	log.Debug("vm %v has pid %v", vm.ID, vm.pid)

	// log the child
	childLog.Close()
	log.LogAll(parentLog, log.DEBUG, "containerShim")

	go vm.console(pseudotty)

	// TODO: add affinity funcs for containers
	// vm.CheckAffinity()

	// network creation for containers happens /after/ the container is
	// started, as we need the PID in order to attach a veth to the container
	// side of the network namespace. That means that unlike kvm vms, we MUST
	// create/destroy taps on launch/kill boundaries (kvm destroys taps on
	// flush).
	if err = vm.launchNetwork(); err != nil {
		log.Errorln(err)
	}

	childSync1.Close()
	if err == nil {
		// wait for the freezer notification
		var buf = make([]byte, 1)
		parentSync1.Read(buf)

		err = vm.freeze()

		parentSync2.Close()
	} else {
		parentSync1.Close()
		parentSync2.Close()
	}

	ccPath := filepath.Join(vm.effectivePath, "cc")

	if err == nil {
		// connect cc. Note that we have a local err here because we don't want
		// to prevent the VM from continuing to launch, even if we can't
		// connect to cc.
		if err := ccNode.ListenUnix(ccPath); err != nil {
			log.Warn("unable to connect to cc for vm %v: %v", vm.ID, err)
		}
	}

	if err != nil {
		// Some error occurred.. clean up the process
		cmd.Process.Kill()

		vm.setError(err)
		return err
	}

	// Channel to signal when the process has exited
	errChan := make(chan error)

	// Create goroutine to wait for process to exit
	go func() {
		defer close(errChan)

		errChan <- cmd.Wait()
	}()

	go func() {
		cgroupFreezerPath := filepath.Join(*f_cgroup, "freezer", "minimega", fmt.Sprintf("%v", vm.ID))
		cgroupMemoryPath := filepath.Join(*f_cgroup, "memory", "minimega", fmt.Sprintf("%v", vm.ID))
		cgroupDevicesPath := filepath.Join(*f_cgroup, "devices", "minimega", fmt.Sprintf("%v", vm.ID))
		sendKillAck := false

		select {
		case err := <-errChan:
			log.Info("VM %v exited", vm.ID)

			vm.lock.Lock()
			defer vm.lock.Unlock()

			// we don't need to check the error for a clean kill,
			// as there's no way to get here if we killed it.
			if err != nil {
				log.Error("kill container: %v", err)
				vm.setError(err)
			}
		case <-vm.kill:
			log.Info("Killing VM %v", vm.ID)

			vm.lock.Lock()
			defer vm.lock.Unlock()

			cmd.Process.Kill()

			// containers cannot exit unless thawed, so thaw it if necessary
			if err := vm.thaw(); err != nil {
				log.Errorln(err)
				vm.setError(err)
			}

			// wait for the taskset to actually exit (from uninterruptible
			// sleep state).
			for {
				t, err := ioutil.ReadFile(filepath.Join(cgroupFreezerPath, "tasks"))
				if err != nil {
					log.Errorln(err)
					vm.setError(err)
					break
				}
				if len(t) == 0 {
					break
				}

				count := strings.Count(string(t), "\n")
				log.Info("waiting on %d tasks for VM %v", count, vm.ID)
				time.Sleep(100 * time.Millisecond)
			}

			// drain errChan
			for err := range errChan {
				log.Debug("kill container: %v", err)
			}

			sendKillAck = true // wait to ack until we've cleaned up
		}

		if vm.ptyUnixListener != nil {
			vm.ptyUnixListener.Close()
		}
		if vm.ptyTCPListener != nil {
			vm.ptyTCPListener.Close()
		}

		// cleanup cc domain socket
		ccNode.CloseUnix(ccPath)

		vm.unlinkNetns()

		for _, net := range vm.Networks {
			br, err := getBridge(net.Bridge)
			if err != nil {
				log.Error("get bridge: %v", err)
			} else {
				br.DestroyTap(net.Tap)
			}
		}

		// clean up the cgroup directory
		if err := os.Remove(cgroupFreezerPath); err != nil {
			log.Errorln(err)
		}
		if err := os.Remove(cgroupMemoryPath); err != nil {
			log.Errorln(err)
		}
		if err := os.Remove(cgroupDevicesPath); err != nil {
			log.Errorln(err)
		}

		if vm.State != VM_ERROR {
			// Set to QUIT unless we've already been put into the error state
			vm.setState(VM_QUIT)
		}

		if sendKillAck {
			killAck <- vm.ID
		}
	}()

	return nil
}
Exemplo n.º 25
0
func main() {
	var err error

	flag.Usage = usage
	flag.Parse()
	if !strings.HasSuffix(*f_base, "/") {
		*f_base += "/"
	}

	if *f_cli {
		doc, err := minicli.Doc()
		if err != nil {
			log.Fatalln("failed to generate docs: %v", err)
		}
		fmt.Println(doc)
		os.Exit(0)
	}

	// rebase f_iomBase if f_base changed but iomBase did not
	if *f_base != BASE_PATH && *f_iomBase == IOM_PATH {
		*f_iomBase = *f_base + "files"
	}

	if !strings.HasSuffix(*f_iomBase, "/") {
		*f_iomBase += "/"
	}

	if *f_version {
		fmt.Println("minimega", version.Revision, version.Date)
		fmt.Println(version.Copyright)
		os.Exit(0)
	}

	logSetup()

	hostname, err = os.Hostname()
	if err != nil {
		log.Fatalln(err)
	}

	if isReserved(hostname) {
		log.Warn("hostname `%s` is a reserved word -- abandon all hope, ye who enter here", hostname)
	}

	vms = make(map[int]*vmInfo)

	// special case, catch -e and execute a command on an already running
	// minimega instance
	if *f_e {
		localCommand()
		return
	}
	if *f_attach {
		cliAttach()
		return
	}

	// warn if we're not root
	user, err := user.Current()
	if err != nil {
		log.Fatalln(err)
	}
	if user.Uid != "0" {
		log.Warnln("not running as root")
	}

	// check for a running instance of minimega
	_, err = os.Stat(*f_base + "minimega")
	if err == nil {
		if !*f_force {
			log.Fatalln("minimega appears to already be running, override with -force")
		}
		log.Warn("minimega may already be running, proceed with caution")
		err = os.Remove(*f_base + "minimega")
		if err != nil {
			log.Fatalln(err)
		}
	}

	// set up signal handling
	sig := make(chan os.Signal, 1024)
	signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
	go func() {
		first := true
		for {
			<-sig
			if *f_panic {
				panic("teardown")
			}
			if first {
				log.Info("caught signal, tearing down, ctrl-c again will force quit")
				go teardown()
				first = false
			} else {
				os.Exit(1)
			}
		}
	}()

	err = checkExternal()
	if err != nil {
		log.Warnln(err.Error())
	}

	// attempt to set up the base path
	err = os.MkdirAll(*f_base, os.FileMode(0770))
	if err != nil {
		log.Fatal("mkdir base path: %v", err)
	}
	pid := os.Getpid()
	err = ioutil.WriteFile(*f_base+"minimega.pid", []byte(fmt.Sprintf("%v", pid)), 0664)
	if err != nil {
		log.Error("write minimega pid: %v", err)
		teardown()
	}
	go commandSocketStart()

	// create a node for meshage
	host, err := os.Hostname()
	if err != nil {
		log.Fatalln(err)
	}
	meshageInit(host, *f_namespace, uint(*f_degree), *f_port)

	fmt.Println(banner)

	// fan out to the number of cpus on the system if GOMAXPROCS env variable is
	// not set.
	if os.Getenv("GOMAXPROCS") == "" {
		cpus := runtime.NumCPU()
		runtime.GOMAXPROCS(cpus)
	}

	if !*f_nostdin {
		cliLocal()
	} else {
		<-sig
		if *f_panic {
			panic("teardown")
		}
	}
	teardown()
}
Exemplo n.º 26
0
// client and transport handler for connections.
func (s *Server) clientHandler(conn net.Conn) {
	defer conn.Close()

	c := &client{
		conn: conn,
		enc:  gob.NewEncoder(conn),
		dec:  gob.NewDecoder(conn),
	}

	// get the first client struct as a handshake
	var handshake Message
	if err := c.dec.Decode(&handshake); err != nil {
		// client disconnected before it sent the full handshake
		if err != io.EOF {
			log.Errorln(err)
		}
		return
	}
	c.Client = handshake.Client
	log.Debug("new client: %v", handshake.Client)

	if c.Version != version.Revision {
		log.Warn("mismatched miniccc version: %v", c.Version)
	}

	// Set up minitunnel, dialing the server that should be running on the
	// client's side. Data is Trunk'd via Messages.
	local, remote := net.Pipe()
	defer local.Close()
	defer remote.Close()

	go func() {
		go Trunk(remote, c.UUID, c.sendMessage)

		tunnel, err := minitunnel.Dial(local)
		if err != nil {
			log.Error("dial: %v", err)
			return
		}

		s.clientLock.Lock()
		defer s.clientLock.Unlock()

		log.Debug("minitunnel created for %v", c.UUID)
		c.tunnel = tunnel
	}()

	c.Checkin = time.Now()

	if err := s.addClient(c); err != nil {
		log.Errorln(err)
		return
	}
	defer s.removeClient(c.UUID)

	var err error

	for err == nil {
		var m Message
		if err = c.dec.Decode(&m); err == nil {
			log.Debug("new message: %v", m.Type)

			switch m.Type {
			case MESSAGE_TUNNEL:
				_, err = remote.Write(m.Tunnel)
			case MESSAGE_FILE:
				m2 := s.readFile(m.Filename)
				m2.UUID = m.UUID
				err = c.sendMessage(m2)
			case MESSAGE_CLIENT:
				s.responses <- m.Client
			case MESSAGE_COMMAND:
				// this shouldn't be sent via the client...
			default:
				err = fmt.Errorf("unknown message type: %v", m.Type)
			}
		}
	}

	if err != io.EOF && !strings.Contains(err.Error(), "connection reset by peer") {
		log.Errorln(err)
	}
}
Exemplo n.º 27
0
func main() {
	var err error

	flag.Usage = usage
	flag.Parse()

	logSetup()

	// see containerShim()
	if flag.NArg() > 1 && flag.Arg(0) == CONTAINER_MAGIC {
		containerShim()
	}

	cliSetup()

	if *f_cli {
		if err := minicli.Validate(); err != nil {
			log.Fatalln(err)
		}

		doc, err := minicli.Doc()
		if err != nil {
			log.Fatal("failed to generate docs: %v", err)
		}
		fmt.Println(doc)
		os.Exit(0)
	}

	// rebase f_iomBase if f_base changed but iomBase did not
	if *f_base != BASE_PATH && *f_iomBase == IOM_PATH {
		*f_iomBase = filepath.Join(*f_base, "files")
	}

	if *f_version {
		fmt.Println("minimega", version.Revision, version.Date)
		fmt.Println(version.Copyright)
		os.Exit(0)
	}

	hostname, err = os.Hostname()
	if err != nil {
		log.Fatalln(err)
	}

	if isReserved(hostname) {
		log.Warn("hostname `%s` is a reserved word -- abandon all hope, ye who enter here", hostname)
	}

	// special case, catch -e and execute a command on an already running
	// minimega instance
	if *f_e || *f_attach {
		// try to connect to the local minimega
		mm, err := miniclient.Dial(*f_base)
		if err != nil {
			log.Fatalln(err)
		}
		mm.Pager = minipager.DefaultPager

		if *f_e {
			a := flag.Args()
			log.Debugln("got args:", a)

			// TODO: Need to escape?
			cmd := minicli.MustCompile(strings.Join(a, " "))
			log.Infoln("got command:", cmd)

			mm.RunAndPrint(cmd, false)
		} else {
			mm.Attach()
		}

		return
	}

	// warn if we're not root
	user, err := user.Current()
	if err != nil {
		log.Fatalln(err)
	}
	if user.Uid != "0" {
		log.Warnln("not running as root")
	}

	// check for a running instance of minimega
	_, err = os.Stat(filepath.Join(*f_base, "minimega"))
	if err == nil {
		if !*f_force {
			log.Fatalln("minimega appears to already be running, override with -force")
		}
		log.Warn("minimega may already be running, proceed with caution")
		err = os.Remove(filepath.Join(*f_base, "minimega"))
		if err != nil {
			log.Fatalln(err)
		}
	}

	// set up signal handling
	sig := make(chan os.Signal, 1024)
	signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
	go func() {
		first := true
		for s := range sig {
			if s == os.Interrupt && first {
				// do nothing
				continue
			}

			if *f_panic {
				panic("teardown")
			}
			if first {
				log.Info("caught signal, tearing down, ctrl-c again will force quit")
				go teardown()
				first = false
			} else {
				os.Exit(1)
			}
		}
	}()

	err = checkExternal()
	if err != nil {
		log.Warnln(err.Error())
	}

	// attempt to set up the base path
	err = os.MkdirAll(*f_base, os.FileMode(0770))
	if err != nil {
		log.Fatal("mkdir base path: %v", err)
	}

	pid := os.Getpid()
	writeOrDie(filepath.Join(*f_base, "minimega.pid"), strconv.Itoa(pid))

	go commandSocketStart()

	// create a node for meshage
	host, err := os.Hostname()
	if err != nil {
		log.Fatalln(err)
	}
	meshageInit(host, *f_context, *f_degree, *f_msaTimeout, *f_port)

	// start the cc service
	ccStart()

	// start tap reaper
	go periodicReapTaps()

	fmt.Println(banner)

	// fan out to the number of cpus on the system if GOMAXPROCS env variable is
	// not set.
	if os.Getenv("GOMAXPROCS") == "" {
		cpus := runtime.NumCPU()
		runtime.GOMAXPROCS(cpus)
	}

	if !*f_nostdin {
		cliLocal()
	} else {
		<-sig
		if *f_panic {
			panic("teardown")
		}
	}
	teardown()
}
Exemplo n.º 28
0
	"qemu-append": vmConfigSlice(func(vm interface{}) *[]string {
		return &mustKVMConfig(vm).QemuAppend
	}, "qemu-append", nil),
	"disk": vmConfigSlice(func(vm interface{}) *[]string {
		return &mustKVMConfig(vm).DiskPaths
	}, "disk", nil),
	"migrate": {
		Update: func(vm interface{}, c *minicli.Command) error {
			fname := c.StringArgs["path"]
			// Ensure that relative paths are always relative to /files/
			if !filepath.IsAbs(fname) {
				fname = filepath.Join(*f_iomBase, fname)
			}

			if _, err := os.Stat(fname); os.IsNotExist(err) {
				log.Warn("migration file does not exist: %v", fname)
			}

			mustKVMConfig(vm).MigratePath = fname
			return nil
		},
		Clear: func(vm interface{}) { mustKVMConfig(vm).MigratePath = "" },
		Print: func(vm interface{}) string { return mustKVMConfig(vm).MigratePath },
	},
	"append": {
		Update: func(vm interface{}, c *minicli.Command) error {
			mustKVMConfig(vm).Append = strings.Join(c.ListArgs["arg"], " ")
			return nil
		},
		Clear: func(vm interface{}) { mustKVMConfig(vm).Append = "" },
		Print: func(vm interface{}) string { return mustKVMConfig(vm).Append },
Exemplo n.º 29
0
func meshageSend(c *minicli.Command, hosts string, respChan chan minicli.Responses) {
	var (
		err        error
		recipients []string
	)

	meshageCommandLock.Lock()
	defer meshageCommandLock.Unlock()

	orig := c.Original

	// HAX: Ensure we aren't sending read or mesh send commands over meshage
	if hasCommand(c, "read") || hasCommand(c, "mesh send") {
		resp := &minicli.Response{
			Host:  hostname,
			Error: fmt.Sprintf("cannot run `%s` over mesh", orig),
		}
		respChan <- minicli.Responses{resp}
		return
	}

	meshageID := rand.Int31()
	// Build a mesh command from the subcommand, assigning a random ID
	meshageCmd := meshageCommand{Command: *c, TID: meshageID}

	if hosts == Wildcard {
		// Broadcast command to all VMs
		recipients = meshageNode.BroadcastRecipients()
	} else {
		// Send to specified list of recipients
		recipients = getRecipients(hosts)
	}

	recipients, err = meshageNode.Set(recipients, meshageCmd)
	if err != nil {
		resp := &minicli.Response{
			Host:  hostname,
			Error: err.Error(),
		}
		respChan <- minicli.Responses{resp}
		return
	}

	log.Debug("meshage sent, waiting on %d responses", len(recipients))
	meshResps := map[string]*minicli.Response{}

	// wait on a response from each recipient
loop:
	for len(meshResps) < len(recipients) {
		select {
		case resp := <-meshageResponseChan:
			body := resp.Body.(meshageResponse)
			if body.TID != meshageID {
				log.Warn("invalid TID from response channel: %d", body.TID)
			} else {
				meshResps[body.Host] = &body.Response
			}
		case <-time.After(meshageTimeout):
			// Didn't hear back from any node within the timeout
			log.Info("meshage send timed out")
			break loop
		}
	}

	// Fill in the responses for recipients that timed out
	resp := minicli.Responses{}
	for _, host := range recipients {
		if v, ok := meshResps[host]; ok {
			resp = append(resp, v)
		} else if host != hostname {
			resp = append(resp, &minicli.Response{
				Host:  host,
				Error: "timed out",
			})
		}
	}

	respChan <- resp
	return
}
Exemplo n.º 30
0
// client and transport handler for connections.
func (s *Server) clientHandler(conn io.ReadWriteCloser) {
	log.Debugln("ron clientHandler")

	enc := gob.NewEncoder(conn)
	dec := gob.NewDecoder(conn)

	// get the first client struct as a handshake
	var handshake Message
	err := dec.Decode(&handshake)
	if err != nil {
		if err != io.EOF {
			log.Errorln(err)
		}
		conn.Close()
		return
	}
	c := handshake.Client

	if c.Version != version.Revision {
		log.Warn("mismatched miniccc version: %v", c.Version)
	}

	c.conn = conn
	c.Checkin = time.Now()

	err = s.addClient(c)
	if err != nil {
		log.Errorln(err)
		conn.Close()
		return
	}

	tunnelQuit := make(chan bool)
	defer func() { tunnelQuit <- true }()

	// create a tunnel connection
	go c.handleTunnel(true, tunnelQuit)

	// handle client i/o
	go func() {
		for {
			m := <-c.out
			if m == nil {
				return
			}
			err := enc.Encode(m)
			if err != nil {
				if err != io.EOF {
					log.Errorln(err)
				}
				s.removeClient(c.UUID)
				return
			}
		}
	}()

	for {
		var m Message
		err := dec.Decode(&m)
		if err != nil {
			if err != io.EOF {
				log.Errorln(err)
			}
			s.removeClient(c.UUID)
			return
		}
		s.in <- &m
	}
}