func (r *run) Run(in io.Reader) (resStr string) { defer func() { if e := recover(); e != nil { // return error in json r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false endCounters() r.Results.Statistics = stats err, _ := json.Marshal(r.Results) resStr = string(err) return } }() // Restrict go runtime processor utilization here, this might be moved // into a more generic agent module function at some point. runtime.GOMAXPROCS(1) startCounters() // Read module parameters from stdin err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } e := &elements{} e.Packages = make([]scribelib.PackageInfo, 0) pkglist := scribelib.QueryPackages() for _, x := range r.Parameters.PkgMatch.Matches { re, err := regexp.Compile(x) if err != nil { panic(err) } for _, y := range pkglist { if !re.MatchString(y.Name) { continue } e.Packages = append(e.Packages, y) } } buf, err := buildResults(*e, &r.Results) if err != nil { panic(err) } resStr = string(buf) return }
// Run *must* be implemented by a module. Its the function that executes the module. // It must return a string of marshalled json that contains the results from the module. // The code below provides a base module skeleton that can be reused in all modules. func (r *run) Run(in io.Reader) (out string) { // a good way to handle execution failures is to catch panics and store // the panicked error into modules.Results.Errors, marshal that, and output // the JSON string back to the caller defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() // read module parameters from stdin err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } // verify that the parameters we received are valid err = r.ValidateParameters() if err != nil { panic(err) } // start a goroutine that does some work and another one that looks // for an early stop signal moduleDone := make(chan bool) stop := make(chan bool) go r.doModuleStuff(&out, &moduleDone) go modules.WatchForStop(in, &stop) select { case <-moduleDone: return out case <-stop: panic("stop message received, terminating early") } }
func (r *run) Run(in io.Reader) (out string) { var ts statistics stats = ts // in debug mode, we just panic if !debug { defer func() { if e := recover(); e != nil { // return error in json res := newResults() res.Statistics = stats res.Errors = append(res.Errors, fmt.Sprintf("%v", e)) res.Success = false err, _ := json.Marshal(res) out = string(err[:]) return } }() } t0 := time.Now() err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } // create the checks based on the search parameters for label, search := range r.Parameters.Searches { if debug { fmt.Println("making checks for label", label) } err := search.makeChecks() if err != nil { panic(err) } r.Parameters.Searches[label] = search } // evaluate each process one by one pids, err, serr := process.GetAllPids() if err != nil { panic(err) } if debug { fmt.Println("found", len(pids), "processes to evaluate") } for _, err = range serr { stats.Failures = append(stats.Failures, err.Error()) } for _, pid := range pids { // activate all searches for label, search := range r.Parameters.Searches { search.activate() r.Parameters.Searches[label] = search } proc, err, serr := process.OpenFromPid(pid) if err != nil { // if we encounter a hard failure, skip this process stats.Failures = append(stats.Failures, err.Error()) continue } for _, err = range serr { // soft failures are just logged but we continue inspection stats.Failures = append(stats.Failures, err.Error()) } err = r.evaluateProcess(proc) if err != nil { stats.Failures = append(stats.Failures, err.Error()) } stats.ProcessCount++ } out, err = r.buildResults(t0) if err != nil { panic(err) } if debug { fmt.Println("---- results ----") var tmpres modules.Result err = json.Unmarshal([]byte(out), &tmpres) printedResults, err := r.PrintResults(tmpres, false) if err != nil { panic(err) } for _, res := range printedResults { fmt.Println(res) } } return }
func (r *run) Run(in io.Reader) (resStr string) { defer func() { if e := recover(); e != nil { // return error in json r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false endCounters() r.Results.Statistics = stats err, _ := json.Marshal(r.Results) resStr = string(err) return } }() // Restrict go runtime processor utilization here, this might be moved // into a more generic agent module function at some point. runtime.GOMAXPROCS(1) // Initialize scribe scribelib.Bootstrap() // Install the file locator; this allows us to use the file module's // search functionality overriding the scribe built-in file system // walk function. scribelib.InstallFileLocator(fileModuleLocator) startCounters() // Read module parameters from stdin err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } document := r.Parameters.ScribeDoc e := &ScribeElements{} e.HumanOutput = r.Parameters.HumanOutput e.JSONOutput = r.Parameters.JSONOutput e.Results = make([]scribelib.TestResult, 0) // Proceed with analysis here. ValidateParameters() will have already // validated the document. err = scribelib.AnalyzeDocument(document) if err != nil { panic(err) } for _, x := range document.GetTestIdentifiers() { tr, err := scribelib.GetResults(&document, x) if err != nil { panic(err) } if !tr.MasterResult && r.Parameters.OnlyTrue { continue } e.Results = append(e.Results, tr) } buf, err := buildResults(*e, &r.Results) if err != nil { panic(err) } resStr = string(buf) return }
func (r *run) Run(in io.Reader) (out string) { var ( stats statistics el elements drift time.Duration ) defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() el.LocalTime = time.Now().Format(time.RFC3339Nano) t1 := time.Now() err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } // if drift is not set, skip the ntp test if r.Parameters.Drift == "" { r.Results.FoundAnything = true goto done } drift, err = time.ParseDuration(r.Parameters.Drift) if err != nil { panic(err) } // assume host has synched time and set to false if not true el.IsWithinDrift = true // attempt to get network time from each of the NTP servers, and exit // as soon as we get a valid result from one of them for i := 0; i < len(NtpPool); i++ { // pick a server between 0 and len of ntppool, somewhat randomly ntpsrv := NtpPool[time.Now().Nanosecond()%len(NtpPool)] t, lat, err := GetNetworkTime(ntpsrv) if err != nil { // failed to get network time, log a failure and try another one stats.NtpStats = append(stats.NtpStats, ntpstats{ Host: ntpsrv, Reachable: false, }) continue } // compare network time to local time localtime := time.Now() if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) continue } if localtime.Before(t.Add(-drift)) { el.IsWithinDrift = false el.Drifts = append(el.Drifts, fmt.Sprintf("Local time is behind ntp host %s by %s", ntpsrv, t.Sub(localtime).String())) } else if localtime.After(t.Add(drift)) { el.IsWithinDrift = false el.Drifts = append(el.Drifts, fmt.Sprintf("Local time is ahead of ntp host %s by %s", ntpsrv, localtime.Sub(t).String())) } stats.NtpStats = append(stats.NtpStats, ntpstats{ Host: ntpsrv, Time: t, Latency: lat, Drift: localtime.Sub(t).String(), Reachable: true, }) el.HasCheckedDrift = true // comparison succeeded, exit the loop break } if !el.IsWithinDrift { r.Results.FoundAnything = true } done: stats.ExecTime = time.Now().Sub(t1).String() out = r.buildResults(el, stats) return }
func (r *run) Run(in io.Reader) (out string) { defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() // read module parameters from stdin err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } // verify that the parameters we received are valid err = r.ValidateParameters() if err != nil { panic(err) } // Refuse to suicide if r.Parameters.PID == os.Getppid() { panic(fmt.Sprintf("PID '%d' is mine. Refusing to suicide.", r.Parameters.PID)) } // get the path of the agent's executable var targetExecutable string switch runtime.GOOS { case "linux", "darwin", "freebsd", "openbsd", "netbsd": targetExecutable, err = os.Readlink(fmt.Sprintf("/proc/%d/exe", r.Parameters.PID)) if err != nil { panic(fmt.Sprintf("Executable path of PID '%d' not found: '%v'", r.Parameters.PID, err)) } case "windows": targetExecutable = fmt.Sprintf("C:\\Program Files\\mig\\mig-agent-%s.exe", r.Parameters.Version) default: panic(fmt.Sprintf("'%s' isn't a supported OS", runtime.GOOS)) } // verify that the executable we're removing isn't in use by the current agent // this can happen when two agents are running of the same executable // in which case, do not remove the executable, and only kill the process myExecutable, err := osext.Executable() if err != nil { panic(fmt.Sprintf("Failed to retrieve my executable location: '%v'", err)) } removeExecutable := true if myExecutable == targetExecutable { r.Results.Errors = append(r.Results.Errors, "Executable not removed because current agent uses it as well") removeExecutable = false } removeStatus := "" if removeExecutable { // check that the binary we're removing has the right version version, err := getAgentVersion(targetExecutable) if err != nil { panic(fmt.Sprintf("Failed to check agent version: '%v'", err)) } if version != r.Parameters.Version { panic(fmt.Sprintf("Version mismatch. Expected '%s', found '%s'", r.Parameters.Version, version)) } err = os.Remove(targetExecutable) if err != nil { panic(fmt.Sprintf("Failed to remove executable '%s': '%v'", targetExecutable, err)) } removeStatus = fmt.Sprintf(" and its executable removed from %s", targetExecutable) } // Then kill the PID process, err := os.FindProcess(r.Parameters.PID) if err != nil { panic(fmt.Sprintf("PID '%d' not found. Returned error '%v'", r.Parameters.PID, err)) } err = process.Kill() if err != nil { panic(fmt.Sprintf("PID '%d' not killed. Returned error '%v'", r.Parameters.PID, err)) } r.Results.Elements = fmt.Sprintf("PID %d from agent %s has been killed%s", r.Parameters.PID, r.Parameters.Version, removeStatus) r.Results.Success = true return r.buildResults() }
func (r run) Run(in io.Reader) (out string) { defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } // Extract the parameters that apply to this OS and Arch key := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) el, ok := r.Parameters[key] if !ok { panic("no upgrade instruction found for " + key) } // Verify that the version we're told to upgrade to isn't the current one cversion, err := getCurrentVersion() if err != nil { panic(err) } if cversion == el["to_version"] { panic("Agent is already running version " + cversion) } // Download new agent binary from provided location binfd, err := downloadBinary(el["location"]) if err != nil { panic(err) } // Verify checksum of the binary err = verifyChecksum(binfd, el["checksum"]) if err != nil { panic(err) } // grab the path before closing the file descriptor binPath := binfd.Name() err = binfd.Close() if err != nil { panic(err) } // Dry run of the binary to verify that the version is correct // but also that it can run without error err = verifyVersion(binPath, el["to_version"]) if err != nil { panic(err) } // Move the binary of the new agent from tmp, to the correct destination agentBinPath, err := moveBinary(binPath, el["to_version"]) if err != nil { panic(err) } // Launch the new agent and exit the module cmd := exec.Command(agentBinPath, "-u") err = cmd.Start() if err != nil { panic(err) } out = r.buildResults() return }
func (r *run) Run(in io.Reader) (resStr string) { defer func() { if e := recover(); e != nil { // return error in json r.Results.Success = false r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) resJson, _ := json.Marshal(r.Results) resStr = string(resJson[:]) return } }() t0 := time.Now() // read module parameters from stdin err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } if r.Parameters.SearchNamespaces { namespaceMode = true } els := *newElements() for _, val := range r.Parameters.LocalMAC { found, el, err := HasLocalMAC(val) if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) } els.LocalMAC[val] = el if found { r.Results.FoundAnything = true } } for _, val := range r.Parameters.NeighborMAC { found, el, err := HasSeenMac(val) if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) } els.NeighborMAC[val] = el if found { r.Results.FoundAnything = true } } for _, val := range r.Parameters.LocalIP { found, el, err := HasLocalIP(val) if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) } els.LocalIP[val] = el if found { r.Results.FoundAnything = true } } for _, val := range r.Parameters.ConnectedIP { found, el, err := HasIPConnected(val) if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) } els.ConnectedIP[val] = el if found { r.Results.FoundAnything = true } } for _, port := range r.Parameters.ListeningPort { found, el, err := HasListeningPort(port) if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) } els.ListeningPort[port] = el if found { r.Results.FoundAnything = true } } r.Results.Elements = els // calculate execution time t1 := time.Now() stats.Exectime = t1.Sub(t0).String() r.Results.Statistics = stats r.Results.Success = true jsonOutput, err := json.Marshal(r.Results) if err != nil { panic(err) } resStr = string(jsonOutput[:]) return }
func (r *run) Run(in io.Reader) (out string) { var ( err error el elements ) defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() err = modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } el.ResolvedHost = r.Parameters.Destination if r.Parameters.Protocol == "udp" || r.Parameters.Protocol == "tcp" { el.ResolvedHost += fmt.Sprintf(":%.0f", r.Parameters.DestinationPort) } el.ResolvedHost += " (" + r.Parameters.ipDest + ")" el.Protocol = r.Parameters.Protocol for i := 0; i < int(r.Parameters.Count); i += 1 { var err error // startTime for calculating the latency/RTT startTime := time.Now() switch r.Parameters.Protocol { case "icmp": err = r.pingIcmp() case "tcp": err = r.pingTcp() case "udp": err = r.pingUdp() } // store the time elapsed before processing potential errors latency := time.Since(startTime).Seconds() * 1000 // evaluate potential ping failures if err != nil { switch err.Error() { case E_Timeout: latency = 9999999 case E_ConnRefused: latency = -1 default: el.Failures = append(el.Failures, fmt.Sprintf("ping #%d failed with error: %v", i+1, err)) latency = 0 } } switch latency { case -1, 0: // do nothing case 9999999: // For udp, a timeout indicates that the port *maybe* open. if r.Parameters.Protocol == "udp" { r.Results.FoundAnything = true } default: r.Results.FoundAnything = true } el.Latencies = append(el.Latencies, latency) // sleep 100 milliseconds between pings to prevent floods time.Sleep(100 * time.Millisecond) } return r.buildResults(el) }
func (r *run) Run(in io.Reader) (out string) { var ( stats statistics el elements drift time.Duration ntpFile *os.File ntpScan *bufio.Scanner ntpPool []string ) defer func() { if e := recover(); e != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", e)) r.Results.Success = false buf, _ := json.Marshal(r.Results) out = string(buf[:]) } }() el.LocalTime = time.Now().Format(time.RFC3339Nano) t1 := time.Now() err := modules.ReadInputParameters(in, &r.Parameters) if err != nil { panic(err) } err = r.ValidateParameters() if err != nil { panic(err) } // if drift is not set, skip the ntp test if r.Parameters.Drift == "" { r.Results.FoundAnything = true goto done } drift, err = time.ParseDuration(r.Parameters.Drift) if err != nil { panic(err) } // assume host has synched time and set to false if not true el.IsWithinDrift = true //Load ntp servers from /etc/ntp.conf ntpFile, err = os.Open("/etc/ntp.conf") if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("Using backup NTP hosts. Failed to read /etc/ntp.conf with error '%v'", err)) } else { defer ntpFile.Close() ntpScan = bufio.NewScanner(ntpFile) for ntpScan.Scan() { ntpFields := strings.Fields(ntpScan.Text()) if len(ntpFields) < 2 { continue } if ntpFields[0] == "server" { ntpPool = append(ntpPool, ntpFields[1]) } } } //Add our hardcoded online servers to the end of our ntpPool as fallbacks ntpPool = append(ntpPool, NtpBackupPool...) // attempt to get network time from each of the NTP servers, and exit // as soon as we get a valid result from one of them for _, ntpsrv := range ntpPool { t, lat, err := GetNetworkTime(ntpsrv) if err != nil { // failed to get network time, log a failure and try another one stats.NtpStats = append(stats.NtpStats, ntpstats{ Host: ntpsrv, Reachable: false, }) continue } // compare network time to local time localtime := time.Now() if err != nil { r.Results.Errors = append(r.Results.Errors, fmt.Sprintf("%v", err)) continue } if localtime.Before(t.Add(-drift)) { el.IsWithinDrift = false el.Drifts = append(el.Drifts, fmt.Sprintf("Local time is behind ntp host %s by %s", ntpsrv, t.Sub(localtime).String())) } else if localtime.After(t.Add(drift)) { el.IsWithinDrift = false el.Drifts = append(el.Drifts, fmt.Sprintf("Local time is ahead of ntp host %s by %s", ntpsrv, localtime.Sub(t).String())) } stats.NtpStats = append(stats.NtpStats, ntpstats{ Host: ntpsrv, Time: t, Latency: lat, Drift: localtime.Sub(t).String(), Reachable: true, }) el.HasCheckedDrift = true // comparison succeeded, exit the loop break } if !el.IsWithinDrift { r.Results.FoundAnything = true } done: stats.ExecTime = time.Now().Sub(t1).String() out = r.buildResults(el, stats) return }