func cliDeploy(c *minicli.Command) *minicli.Response { log.Debugln("deploy") resp := &minicli.Response{Host: hostname} hosts := c.StringArgs["hosts"] user := c.StringArgs["user"] sudo := c.BoolArgs["sudo"] flagsList := c.ListArgs["minimega"] if c.BoolArgs["flags"] { if flagsList == nil { resp.Response = deployGetFlags() } else { deployFlags = flagsList } return resp } hostsExpanded, err := ranges.SplitList(hosts) if err != nil { resp.Error = err.Error() return resp } log.Debug("got expanded hosts: %v", hostsExpanded) suffix := rand.New(rand.NewSource(time.Now().UnixNano())).Int31() remotePath := filepath.Join(os.TempDir(), fmt.Sprintf("minimega_deploy_%v", suffix)) log.Debug("remotePath: %v", remotePath) // copy minimega errs := deployCopy(hostsExpanded, user, remotePath) if errs != nil { // just report the errors and keep trying for _, e := range errs { resp.Error += fmt.Sprintf("%v\n", e.Error()) } } // launch minimega on each remote node errs = deployRun(hostsExpanded, user, remotePath, sudo) if errs != nil { for _, e := range errs { resp.Error += fmt.Sprintf("%v\n", e.Error()) } } return resp }
// expandLaunchNames is ExpandLaunchNames without locking vmLock. func expandLaunchNames(arg string, vms VMs) ([]string, error) { names := []string{} count, err := strconv.ParseInt(arg, 10, 32) if err != nil { names, err = ranges.SplitList(arg) } else if count <= 0 { err = errors.New("invalid number of vms (must be > 0)") } else { names = make([]string, count) } if err != nil { return nil, err } if len(names) == 0 { return nil, errors.New("no VMs to launch") } for _, name := range names { if isReserved(name) { return nil, fmt.Errorf("invalid vm name, `%s` is a reserved word", name) } if _, err := strconv.Atoi(name); err == nil { return nil, fmt.Errorf("invalid vm name, `%s` is an integer", name) } for _, vm := range vms { if !inNamespace(vm) { continue } if vm.GetName() == name { return nil, fmt.Errorf("vm already exists with name `%s`", name) } } } return names, nil }
// meshageRecipients expands a hosts into a list of hostnames. Supports // expanding Wildcard to all hosts in the mesh or all hosts in the active // namespace. func meshageRecipients(hosts string) ([]string, error) { ns := GetNamespace() if hosts == Wildcard { if ns == nil { return meshageNode.BroadcastRecipients(), nil } recipients := []string{} // Wildcard expands to all hosts in the namespace, except the local // host, if included for host := range ns.Hosts { if host == hostname { log.Info("excluding localhost, %v, from `%v`", hostname, Wildcard) continue } recipients = append(recipients, host) } return recipients, nil } recipients, err := ranges.SplitList(hosts) if err != nil { return nil, err } // If a namespace is active, warn if the user is trying to mesh send hosts // outside the namespace if ns != nil { for _, host := range recipients { if !ns.Hosts[host] { log.Warn("%v is not part of namespace %v", host, ns.Name) } } } return recipients, nil }
func cliDeploy(c *minicli.Command, resp *minicli.Response) error { hosts := c.StringArgs["hosts"] user := c.StringArgs["user"] sudo := c.BoolArgs["sudo"] flagsList := c.ListArgs["minimega"] if c.BoolArgs["flags"] { if flagsList == nil { resp.Response = deployGetFlags() return nil } deployFlags = flagsList return nil } hostsExpanded, err := ranges.SplitList(hosts) if err != nil { return err } log.Debug("got expanded hosts: %v", hostsExpanded) // Append timestamp to filename so that each deploy produces a new binary // on the remote system. Using the timestamp allows us to quickly identify // the latest binary after multiple deployments. fname := fmt.Sprintf("minimega_deploy_%v", time.Now().Unix()) remotePath := filepath.Join(os.TempDir(), fname) log.Debug("remotePath: %v", remotePath) // copy minimega errs := deployCopy(hostsExpanded, user, remotePath) // launch minimega on each remote node errs2 := deployRun(hostsExpanded, user, remotePath, sudo) return makeErrSlice(append(errs, errs2...)) }
func cliNamespaceMod(c *minicli.Command, resp *minicli.Response) error { ns := GetNamespace() if ns == nil { return errors.New("cannot run nsmod without active namespace") } // Empty string should parse fine... hosts, err := ranges.SplitList(c.StringArgs["hosts"]) if err != nil { return fmt.Errorf("invalid hosts -- %v", err) } if c.BoolArgs["add-host"] { peers := map[string]bool{} for _, peer := range meshageNode.BroadcastRecipients() { peers[peer] = true } // Test that the host is actually in the mesh. If it's not, we could // try to mesh dial it... Returning an error is simpler, for now. for i := range hosts { // Add all the peers if we see a wildcard if hosts[i] == Wildcard { for peer := range peers { ns.Hosts[peer] = true } return nil } // Resolve `localhost` to actual hostname if hosts[i] == Localhost { hosts[i] = hostname } // Otherwise, ensure that the peer is in the mesh if hosts[i] != hostname && !peers[hosts[i]] { return fmt.Errorf("unknown host: `%v`", hosts[i]) } } // After all have been checked, updated the namespace for _, host := range hosts { ns.Hosts[host] = true } return nil } else if c.BoolArgs["del-host"] { for _, host := range hosts { if host == Wildcard { ns.Hosts = map[string]bool{} break } delete(ns.Hosts, host) } return nil } // boo, should be unreachable return errors.New("unreachable") }
func cliVmLaunch(c *minicli.Command) *minicli.Response { resp := &minicli.Response{Host: hostname} arg := c.StringArgs["name"] names := []string{} count, err := strconv.ParseInt(arg, 10, 32) if err != nil { names, err = ranges.SplitList(arg) } else if count <= 0 { err = errors.New("invalid number of vms (must be > 0)") } else { for i := int64(0); i < count; i++ { names = append(names, "") } } if len(names) == 0 && err == nil { err = errors.New("no VMs to launch") } if err != nil { resp.Error = err.Error() return resp } for _, name := range names { if isReserved(name) { resp.Error = fmt.Sprintf("`%s` is a reserved word -- cannot use for vm name", name) return resp } if _, err := strconv.Atoi(name); err == nil { resp.Error = fmt.Sprintf("`%s` is an integer -- cannot use for vm name", name) return resp } for _, vm := range vms { if vm.Name() == name { resp.Error = fmt.Sprintf("`%s` is already the name of a VM", name) return resp } } } noblock := c.BoolArgs["noblock"] delete(c.BoolArgs, "noblock") // Parse the VM type, at this point there should only be one key left in // BoolArgs and it should be the VM type. var vmType VMType for k := range c.BoolArgs { var err error vmType, err = ParseVMType(k) if err != nil { log.Fatalln("expected VM type, not `%v`", k) } } log.Info("launching %v %v vms", len(names), vmType) ack := make(chan int) waitForAcks := func(count int) { // get acknowledgements from each vm for i := 0; i < count; i++ { log.Debug("launch ack from VM %v", <-ack) } } for i, name := range names { if err := vms.launch(name, vmType, ack); err != nil { resp.Error = err.Error() go waitForAcks(i) return resp } } if noblock { go waitForAcks(len(names)) } else { waitForAcks(len(names)) } return resp }
// expandVmTargets is the fan out/in method to apply a function to a set of VMs // specified by target. Specifically, it: // // 1. Expands target to a list of VM names and IDs (or wild) // 2. Invokes fn on all the matching VMs // 3. Collects all the errors from the invoked fns // 4. Records in the log a list of VMs that were not found // // The fn that is passed in takes two arguments: the VM struct and a boolean // specifying whether the invocation was wild or not. The fn returns a boolean // that indicates whether the target was applicable (e.g. calling start on an // already running VM would not be applicable) and an error. // // The concurrent boolean controls whether fn is run concurrently on multiple // VMs or not. If the fns alter state they can set this flag to false rather // than dealing with locking. func expandVmTargets(target string, concurrent bool, fn func(VM, bool) (bool, error)) []error { names := map[string]bool{} // Names of VMs for which to apply fn ids := map[int]bool{} // IDs of VMs for which to apply fn vals, err := ranges.SplitList(target) if err != nil { return []error{err} } for _, v := range vals { id, err := strconv.Atoi(v) if err == nil { ids[id] = true } else { names[v] = true } } wild := hasWildcard(names) delete(names, Wildcard) // wg determine when it's okay to close errChan var wg sync.WaitGroup errChan := make(chan error) // lock prevents concurrent writes to results var lock sync.Mutex results := map[string]bool{} // Wrap function with magic magicFn := func(vm VM) { defer wg.Done() ok, err := fn(vm, wild) if err != nil { errChan <- err } lock.Lock() defer lock.Unlock() results[vm.Name()] = ok } for _, vm := range vms { if wild || names[vm.Name()] || ids[vm.ID()] { delete(names, vm.Name()) delete(ids, vm.ID()) wg.Add(1) // Use concurrency only if requested if concurrent { go magicFn(vm) } else { magicFn(vm) } } } go func() { wg.Wait() close(errChan) }() var errs []error for err := range errChan { errs = append(errs, err) } // Special cases: specified one VM and // 1. it wasn't found // 2. it wasn't a valid target (e.g. start already running VM) if len(vals) == 1 && !wild { if (len(names) + len(ids)) == 1 { errs = append(errs, fmt.Errorf("VM not found: %v", vals[0])) } else if !results[vals[0]] { errs = append(errs, fmt.Errorf("VM state error: %v", vals[0])) } } // Log the names/ids of the vms that weren't found if (len(names) + len(ids)) > 0 { vals := []string{} for v := range names { vals = append(vals, v) } for v := range ids { vals = append(vals, strconv.Itoa(v)) } log.Info("VMs not found: %v", vals) } return errs }
func meshageSend(c *minicli.Command, hosts string, respChan chan minicli.Responses) { var ( err error recipients []string ) meshageCommandLock.Lock() defer meshageCommandLock.Unlock() orig := c.Original // HAX: Ensure we aren't sending read or mesh send commands over meshage if hasCommand(c, "read") || hasCommand(c, "mesh send") { resp := &minicli.Response{ Host: hostname, Error: fmt.Sprintf("cannot run `%s` over mesh", orig), } respChan <- minicli.Responses{resp} return } meshageID := rand.Int31() // Build a mesh command from the subcommand, assigning a random ID meshageCmd := meshageCommand{Command: *c, TID: meshageID} if hosts == Wildcard { // Broadcast command to all VMs recipients = meshageNode.BroadcastRecipients() } else { // Send to specified list of recipients recipients, err = ranges.SplitList(hosts) } if err == nil { recipients, err = meshageNode.Set(recipients, meshageCmd) } if err != nil { resp := &minicli.Response{ Host: hostname, Error: err.Error(), } respChan <- minicli.Responses{resp} return } log.Debug("meshage sent, waiting on %d responses", len(recipients)) meshResps := map[string]*minicli.Response{} // wait on a response from each recipient loop: for len(meshResps) < len(recipients) { select { case resp := <-meshageResponseChan: body := resp.Body.(meshageResponse) if body.TID != meshageID { log.Warn("invalid TID from response channel: %d", body.TID) } else { meshResps[body.Host] = &body.Response } case <-time.After(meshageTimeout): // Didn't hear back from any node within the timeout log.Info("meshage send timed out") break loop } } // Fill in the responses for recipients that timed out resp := minicli.Responses{} for _, host := range recipients { if v, ok := meshResps[host]; ok { resp = append(resp, v) } else if host != hostname { resp = append(resp, &minicli.Response{ Host: host, Error: "timed out", }) } } respChan <- resp return }