Пример #1
0
func getRecipients(r string) []string {
	f := strings.Split(r, ",")

	// fix splits on things like kn[1-5,200,150]
	var hosts []string
	appendState := false
	for _, v := range f {
		if strings.Contains(v, "[") {
			appendState = true
			hosts = append(hosts, v)
			if !strings.Contains(v, "]") {
				hosts[len(hosts)-1] += ","
			}
			continue
		}
		if appendState == true {
			hosts[len(hosts)-1] += v
			if strings.Contains(v, "]") {
				appendState = false
			} else {
				hosts[len(hosts)-1] += ","
			}
			continue
		}
		hosts = append(hosts, v)
	}
	log.Debugln("getRecipients first pass: "******"expanded range: %v", ret)
		hostsExpanded = append(hostsExpanded, ret...)
	}
	log.Debugln("getRecipients expanded pass: ", hostsExpanded)
	return hostsExpanded
}
Пример #2
0
func main() {
	flag.Parse()

	r, _ := ranges.NewRange(*f_prefix, 1, 520)

	data, err := ioutil.ReadFile(os.Args[1])
	if err != nil {
		fmt.Fprintf(os.Stderr, err.Error())
		os.Exit(1)
	}
	input := strings.Fields(string(data))

	res, err := r.UnsplitRange(input)
	if err != nil {
		fmt.Fprintf(os.Stderr, err.Error())
		os.Exit(1)
	}
	fmt.Println(res)
}
Пример #3
0
func compressHosts(hosts []string) string {
	var res []string

	// Add all the hosts to a trie
	trie := newTrie()
	for _, v := range hosts {
		trie.Add(v)
	}
	prefixes := trie.AlphaPrefixes()

	// Find the longest prefix match for each host
	groups := map[string][]string{}
	for _, h := range hosts {
		longest := ""
		for _, p := range prefixes {
			if strings.HasPrefix(h, p) && len(p) > len(longest) {
				longest = p
			}
		}

		groups[longest] = append(groups[longest], h)
	}

	// Compress each group of hosts that share the same prefix
	for p, group := range groups {
		r, _ := ranges.NewRange(p, 0, int(math.MaxInt32))

		s, err := r.UnsplitRange(group)
		if err != nil {
			// Fallback, append all the hosts
			res = append(res, group...)
			continue
		}

		res = append(res, s)
	}

	sort.Strings(res)

	return strings.Join(res, ",")
}
Пример #4
0
// TODO: Rewrite this to use Header/Tabular.
func optimizeStatus() (string, error) {
	var o bytes.Buffer
	w := new(tabwriter.Writer)
	w.Init(&o, 5, 0, 1, ' ', 0)
	fmt.Fprintf(w, "Subsystem\tEnabled\n")
	fmt.Fprintf(w, "KSM\t%v\n", ksmEnabled)

	hugepagesEnabled := "false"
	if hugepagesMountPath != "" {
		hugepagesEnabled = fmt.Sprintf("true [%v]", hugepagesMountPath)
	}
	fmt.Fprintf(w, "hugepages\t%v\n", hugepagesEnabled)

	r, err := ranges.NewRange("", 0, runtime.NumCPU()-1)
	if err != nil {
		return "", fmt.Errorf("cpu affinity ranges: %v", err)
	}

	var cpus []string
	for k, _ := range affinityCPUSets {
		cpus = append(cpus, k)
	}
	cpuRange, err := r.UnsplitRange(cpus)
	if err != nil {
		return "", fmt.Errorf("cannot compress CPU range: %v", err)
	}

	if affinityEnabled {
		fmt.Fprintf(w, "CPU affinity\ttrue with cpus %v\n", cpuRange)
	} else {
		fmt.Fprintf(w, "CPU affinity\tfalse\n")
	}

	w.Flush()
	return o.String(), nil
}
Пример #5
0
// Takes a range specification like ccc[1-4,6,8-10], converts
// it to a list of node names (ccc1 ccc2 and so on), then makes
// a collection of Devices which contain only those corresponding
// outlets in their outlet list.
// This makes it handy because you'll generally be calling On(),
// Off(), etc. a device at a time.
func findOutletsAndDevs(s string) (map[string]Device, error) {
	ret := make(map[string]Device)
	var nodes []string
	var err error

	ranger, _ := ranges.NewRange(config.prefix, 0, 1000000)
	nodes, err = ranger.SplitRange(s)
	if err != nil {
		return ret, err
	}

	// This is really gross but you won't have a ton of devices anyway
	// so it should be pretty fast.
	// For each of the specified nodes...
	for _, n := range nodes {
		// Check in each device...
		for _, d := range config.devices {
			// If that node is connected to this device...
			if o, ok := d.outlets[n]; ok {
				if _, ok := ret[d.name]; ok {
					// either add the outlet to an
					// existing return device...
					ret[d.name].outlets[n] = o
				} else {
					// or create a new device to
					// return, and add the outlet
					tmp := Device{name: d.name, host: d.host, port: d.port, pdutype: d.pdutype, username: d.username, password: d.password}
					tmp.outlets = make(map[string]string)
					tmp.outlets[n] = o
					ret[tmp.name] = tmp
				}
			}
		}
	}
	return ret, nil
}
Пример #6
0
func cliVmLaunch(c *minicli.Command) *minicli.Response {
	resp := &minicli.Response{Host: hostname}

	arg := c.StringArgs["name"]
	vmNames := []string{}

	count, err := strconv.ParseInt(arg, 10, 32)
	if err == nil {
		if count <= 0 {
			resp.Error = "invalid number of vms (must be >= 1)"
			return resp
		}

		for i := int64(0); i < count; i++ {
			vmNames = append(vmNames, "")
		}
	} else {
		index := strings.IndexRune(arg, '[')
		if index == -1 {
			vmNames = append(vmNames, arg)
		} else {
			r, err := ranges.NewRange(arg[:index], 0, int(math.MaxInt32))
			if err != nil {
				log.Fatalln(err)
			}

			names, err := r.SplitRange(arg)
			if err != nil {
				resp.Error = err.Error()
				return resp
			}
			vmNames = append(vmNames, names...)
		}
	}

	if len(vmNames) == 0 {
		resp.Error = "no VMs to launch"
		return resp
	}

	for _, name := range vmNames {
		if isReserved(name) {
			resp.Error = fmt.Sprintf("`%s` is a reserved word -- cannot use for vm name", name)
			return resp
		}
	}

	log.Info("launching %v vms", len(vmNames))

	ack := make(chan int)
	waitForAcks := func(count int) {
		// get acknowledgements from each vm
		for i := 0; i < count; i++ {
			log.Debug("launch ack from VM %v", <-ack)
		}
	}

	for i, vmName := range vmNames {
		if err := vms.launch(vmName, ack); err != nil {
			resp.Error = err.Error()
			go waitForAcks(i)
			return resp
		}
	}

	if c.BoolArgs["noblock"] {
		go waitForAcks(len(vmNames))
	} else {
		waitForAcks(len(vmNames))
	}

	return resp
}
Пример #7
0
func cliOptimize(c *minicli.Command) *minicli.Response {
	resp := &minicli.Response{Host: hostname}

	if c.BoolArgs["ksm"] {
		if len(c.BoolArgs) == 1 {
			// Must want to print ksm status
			resp.Response = fmt.Sprintf("%v", ksmEnabled)
		} else if c.BoolArgs["true"] {
			// Must want to update ksm status to true
			ksmEnable()
		} else {
			// Must want to update ksm status to false
			ksmDisable()
		}
	} else if c.BoolArgs["hugepages"] {
		if len(c.BoolArgs) == 1 {
			// Must want to print hugepage path
			resp.Response = fmt.Sprintf("%v", hugepagesMountPath)
		} else {
			hugepagesMountPath = c.StringArgs["path"]
		}
	} else if c.BoolArgs["affinity"] {
		if len(c.BoolArgs) == 1 {
			// Must want to print affinity status
			resp.Header = []string{"CPU", "VMs"}
			resp.Tabular = [][]string{}

			var cpus []string
			for k, _ := range affinityCPUSets {
				cpus = append(cpus, k)
			}

			sort.Strings(cpus)

			for _, cpu := range cpus {
				var ids []int
				for _, vm := range affinityCPUSets[cpu] {
					ids = append(ids, vm.GetID())
				}
				resp.Tabular = append(resp.Tabular, []string{
					cpu,
					fmt.Sprintf("%v", ids)})
			}
		} else if c.BoolArgs["filter"] {
			r, err := ranges.NewRange("", 0, runtime.NumCPU()-1)
			if err != nil {
				resp.Error = fmt.Sprintf("cpu affinity ranges: %v", err)
				return resp
			}

			cpus, err := r.SplitRange(c.StringArgs["filter"])
			if err != nil {
				resp.Error = fmt.Sprintf("cannot expand CPU range: %v", err)
				return resp
			}

			affinityCPUSets = make(map[string][]*KvmVM)
			for _, v := range cpus {
				affinityCPUSets[v] = []*KvmVM{}
			}

			if affinityEnabled {
				affinityEnable()
			}
		} else if c.BoolArgs["true"] && !affinityEnabled {
			// Enabling affinity
			affinityEnable()
		} else if c.BoolArgs["false"] && affinityEnabled {
			// Disabling affinity
			affinityDisable()
		}
	} else {
		// Summary of optimizations
		var err error
		resp.Response, err = optimizeStatus()
		if err != nil {
			resp.Error = err.Error()
		}
	}

	return resp
}
Пример #8
0
func runSub(cmd *Command, args []string) {
	var nodes []string
	var IPs []net.IP
	var pxefiles []string

	// Open and lock the reservation file
	path := filepath.Join(igorConfig.TFTPRoot, "/igor/reservations.json")
	resdb, err := os.OpenFile(path, os.O_RDWR, 664)
	if err != nil {
		fatalf("failed to open reservations file: %v", err)
	}
	defer resdb.Close()
	err = syscall.Flock(int(resdb.Fd()), syscall.LOCK_EX)
	defer syscall.Flock(int(resdb.Fd()), syscall.LOCK_UN) // this will unlock it later

	reservations := getReservations(resdb)

	// validate arguments
	if subR == "" || subK == "" || subI == "" || (subN == 0 && subW == "") {
		errorf("Missing required argument!")
		help([]string{"sub"})
		exit()
	}

	// figure out which nodes to reserve
	if subW != "" {
		rnge, _ := ranges.NewRange(igorConfig.Prefix, igorConfig.Start, igorConfig.End)
		nodes, _ = rnge.SplitRange(subW)
	}

	// Convert list of node names to PXE filenames
	// 1. lookup nodename -> IP
	for _, hostname := range nodes {
		ip, err := net.LookupIP(hostname)
		if err != nil {
			fatalf("failure looking up %v: %v", hostname, err)
		}
		IPs = append(IPs, ip...)
	}

	// 2. IP -> hex
	for _, ip := range IPs {
		pxefiles = append(pxefiles, toPXE(ip))
	}

	// Make sure none of those nodes are reserved
	// Check every reservation...
	for _, res := range reservations {
		// For every node in a reservation...
		for _, node := range res.PXENames {
			// make sure no node in *our* potential reservation conflicts
			for _, pxe := range pxefiles {
				if node == pxe {
					fatalf("Conflict with reservation %v, specific PXE file %v\n", res.ResName, pxe)
				}
			}
		}
	}

	// Ok, build our reservation
	reservation := Reservation{ResName: subR, Hosts: nodes, PXENames: pxefiles}
	user, err := user.Current()
	reservation.Owner = user.Username
	reservation.Expiration = (time.Now().Add(time.Duration(subT) * time.Hour)).Unix()

	// Add it to the list of reservations
	reservations = append(reservations, reservation)

	// copy kernel and initrd
	// 1. Validate and open source files
	ksource, err := os.Open(subK)
	if err != nil {
		fatalf("couldn't open kernel: %v", err)
	}
	isource, err := os.Open(subI)
	if err != nil {
		fatalf("couldn't open initrd: %v", err)
	}

	// make kernel copy
	fname := filepath.Join(igorConfig.TFTPRoot, "igor", subR+"-kernel")
	kdest, err := os.Create(fname)
	if err != nil {
		fatalf("failed to create %v -- %v", fname, err)
	}
	io.Copy(kdest, ksource)
	kdest.Close()
	ksource.Close()

	// make initrd copy
	fname = filepath.Join(igorConfig.TFTPRoot, "igor", subR+"-initrd")
	idest, err := os.Create(fname)
	if err != nil {
		fatalf("failed to create %v -- %v", fname, err)
	}
	io.Copy(idest, isource)
	idest.Close()
	isource.Close()

	// create appropriate pxe config file in igorConfig.TFTPRoot+/pxelinux.cfg/igor/
	fname = filepath.Join(igorConfig.TFTPRoot, "pxelinux.cfg", "igor", subR)
	masterfile, err := os.Create(fname)
	if err != nil {
		fatalf("failed to create %v -- %v", fname, err)
	}
	defer masterfile.Close()
	masterfile.WriteString(fmt.Sprintf("default %s\n\n", subR))
	masterfile.WriteString(fmt.Sprintf("label %s\n", subR))
	masterfile.WriteString(fmt.Sprintf("kernel /igor/%s-kernel\n", subR))
	masterfile.WriteString(fmt.Sprintf("append initrd=/igor/%s-initrd %s\n", subR, subC))

	// create individual PXE boot configs i.e. igorConfig.TFTPRoot+/pxelinux.cfg/AC10001B by copying config created above
	for _, pxename := range pxefiles {
		masterfile.Seek(0, 0)
		fname := filepath.Join(igorConfig.TFTPRoot, "pxelinux.cfg", pxename)
		f, err := os.Create(fname)
		if err != nil {
			fatalf("failed to create %v -- %v", fname, err)
		}
		io.Copy(f, masterfile)
		f.Close()
	}

	// Truncate the existing reservation file
	resdb.Truncate(0)
	resdb.Seek(0, 0)
	// Write out the new reservations
	enc := json.NewEncoder(resdb)
	enc.Encode(reservations)
	resdb.Sync()
}
Пример #9
0
// Ping every node (concurrently), then show which nodes are up
// and which nodes are in which reservation
func runShow(cmd *Command, args []string) {
	path := filepath.Join(igorConfig.TFTPRoot, "/igor/reservations.json")
	resdb, err := os.OpenFile(path, os.O_RDWR, 664)
	if err != nil {
		fatalf("failed to open reservations file: %v", err)
	}
	defer resdb.Close()
	// We lock to make sure it doesn't change from under us
	// NOTE: not locking for now, haven't decided how important it is
	//err = syscall.Flock(int(resdb.Fd()), syscall.LOCK_EX)
	//defer syscall.Flock(int(resdb.Fd()), syscall.LOCK_UN)	// this will unlock it later
	reservations := getReservations(resdb)

	// Find out what nodes are down
	nodesAlive := make(map[int]bool) // if node is alive, set bool to true
	done := make(chan bool)
	fifo := make(chan int, 200)
	for i := igorConfig.Start; i <= igorConfig.End; i++ {
		fifo <- 1
		go func(i int) {
			hostname := fmt.Sprintf("%s%d", igorConfig.Prefix, i)
			nodesAlive[i] = isAlive(hostname)
			<-fifo
			done <- true
		}(i)
	}
	for i := igorConfig.Start; i <= igorConfig.End; i++ {
		<-done
	}
	var downNodes []string
	for number, isup := range nodesAlive {
		if !isup {
			hostname := fmt.Sprintf("%s%d", igorConfig.Prefix, number)
			downNodes = append(downNodes, hostname)
		}
	}

	rnge, _ := ranges.NewRange(igorConfig.Prefix, igorConfig.Start, igorConfig.End)

	printShelves(reservations, nodesAlive)

	w := new(tabwriter.Writer)
	w.Init(os.Stdout, 10, 8, 0, '\t', 0)

	//	fmt.Fprintf(w, "Reservations for cluster nodes %s[%d-%d]\n", igorConfig.Prefix, igorConfig.Start, igorConfig.End)
	fmt.Fprintln(w, "NAME", "\t", "OWNER", "\t", "TIME LEFT", "\t", "NODES")
	fmt.Fprintf(w, "--------------------------------------------------------------------------------\n")
	w.Flush()
	downrange, _ := rnge.UnsplitRange(downNodes)
	fmt.Print(BgRed + "DOWN" + Reset)
	fmt.Fprintln(w, "\t", "N/A", "\t", "N/A", "\t", downrange)
	w.Flush()
	for idx, r := range reservations {
		unsplit, _ := rnge.UnsplitRange(r.Hosts)
		timeleft := fmt.Sprintf("%.1f", time.Unix(r.Expiration, 0).Sub(time.Now()).Hours())
		//		fmt.Fprintln(w, colorize(idx, r.ResName), "\t", r.Owner, "\t", timeleft, "\t", unsplit)
		fmt.Print(colorize(idx, r.ResName))
		fmt.Fprintln(w, "\t", r.Owner, "\t", timeleft, "\t", unsplit)
		w.Flush()
	}
	w.Flush()
}