// Pause kills the hashcat process and marks the job as paused func (t *Tasker) Pause() error { log.WithField("task", t.job.UUID).Debug("Attempting to pause hashcat task") // Call status to update the job internals before pausing t.Status() if t.job.Status == common.STATUS_RUNNING { t.mux.Lock() if runtime.GOOS == "windows" { t.exec.Process.Kill() } else { io.WriteString(t.stdinPipe, "c") time.Sleep(1 * time.Second) io.WriteString(t.stdinPipe, "q") } t.mux.Unlock() // Wait for the program to actually exit t.doneWG.Wait() } // Change status to pause t.mux.Lock() t.job.Status = common.STATUS_PAUSED t.mux.Unlock() log.WithField("task", t.job.UUID).Debug("Task paused successfully") return nil }
// This is an internal function used to update the status of all Jobs. // A LOCK SHOULD ALREADY BE HELD TO CALL THIS FUNCTION. func (q *Queue) updateQueue() { // Loop through jobs and get the status of running jobs for i, _ := range q.stack { if q.stack[i].Status == common.STATUS_RUNNING { // Build status update call jobStatus := common.RPCCall{Job: q.stack[i]} err := q.pool[q.stack[i].ResAssigned].Client.Call("Queue.TaskStatus", jobStatus, &q.stack[i]) // we care about the errors, but only from a logging perspective if err != nil { log.WithField("rpc error", err.Error()).Error("Error during RPC call.") } // Check if this is now no longer running if q.stack[i].Status != common.STATUS_RUNNING { // Release the resources from this change log.WithField("JobID", q.stack[i].UUID).Debug("Job has finished.") var hw string for _, v := range q.pool[q.stack[i].ResAssigned].Tools { if v.UUID == q.stack[i].ToolUUID { hw = v.Requirements } } q.pool[q.stack[i].ResAssigned].Hardware[hw] = true } } } }
func (q *Queue) TaskRun(rpc common.RPCCall, j *common.Job) error { log.WithField("task", rpc.Job.UUID).Debug("Attempting to run task") // Add a defered catch for panic from within the tools defer func() { if err := recover(); err != nil { log.Errorf("Recovered from Panic in Resource.TaskRun: %v", err) } }() // Grab the task specified by the UUID q.Lock() defer q.Unlock() log.WithField("Stack", q.stack).Debug("Stack") _, ok := q.stack[rpc.Job.UUID] // Check for a bad UUID if ok == false { log.WithField("task", rpc.Job.UUID).Debug("Task with UUID provided does not exist.") return errors.New("Task with UUID provided does not exist.") } // Start or resume the task err := q.stack[rpc.Job.UUID].Run() if err != nil { return err } *j = q.stack[rpc.Job.UUID].Status() log.WithField("task", j.UUID).Debug("Task ran successfully") return nil }
// List all sizes. func SizeList(c *cli.Context) { client := NewClient(c, DefaultConfig) opts := LoadOpts(c) f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { list, resp, err := client.Sizes.List(opt) if err != nil { return nil, nil, err } si := make([]interface{}, len(list)) for i := range list { si[i] = list[i] } return si, resp, err } si, err := PaginateResp(f, opts) if err != nil { logrus.WithField("err", err).Fatal("could not list sizes") } list := make([]godo.Size, len(si)) for i := range si { list[i] = si[i].(godo.Size) } err = displayOutput(c, list) if err != nil { logrus.WithField("err", err).Fatal("could not write output") } }
func getHandler(w http.ResponseWriter, r *http.Request) *toadError { if r.Method == "GET" { log.Warn("Receiving GET file request") //take filename & send ask chain for hash params, err := parseURL(fmt.Sprintf("%s", r.URL)) if err != nil { return &toadError{err, "error parsing URL", 400} } fileName := params["fileName"] log.WithField("=>", fileName).Warn("Looking for filename:") hash, err := tscore.GetInfos(fileName) if err != nil { return &toadError{err, "error getting namereg info", 400} } log.WithField("=>", hash).Warn("Found corresponding hash:") log.Warn("Getting it from IPFS...") contents, err := tscore.GetFile(fileName, hash) if err != nil { return &toadError{err, "error getting file", 400} } w.Write(contents) //outputfile if err := os.Remove(fileName); err != nil { return &toadError{err, "error removing file", 400} } log.Warn("Congratulations, you have successfully retreived you file from the toadserver") } return nil }
func processMetrics() { var ( g = metrics.NewGauge() fg = metrics.NewGauge() memg = metrics.NewGauge() ) metrics.DefaultRegistry.Register("goroutines", g) metrics.DefaultRegistry.Register("fds", fg) metrics.DefaultRegistry.Register("memory-used", memg) collect := func() { // update number of goroutines g.Update(int64(runtime.NumGoroutine())) // collect the number of open fds fds, err := osutils.GetOpenFds(os.Getpid()) if err != nil { logrus.WithField("error", err).Error("containerd: get open fd count") } fg.Update(int64(fds)) // get the memory used m := sigar.ProcMem{} if err := m.Get(os.Getpid()); err != nil { logrus.WithField("error", err).Error("containerd: get pid memory information") } memg.Update(int64(m.Size)) } go func() { collect() for range time.Tick(30 * time.Second) { collect() } }() }
/* Stop the job running under this tool, don't forget to cleanup and file system resources, etc. */ func (v *johndictTasker) Quit() common.Job { log.WithField("Task", v.job.UUID).Debug("Attempting to quit johndict task.") // Update the jobs status log.Debug("Getting status before quit") v.Status() v.mux.Lock() // Kill the process after a SIGHUP log.Debug("Sending SIGHUP before process kill") v.cmd.Process.Signal(syscall.SIGHUP) log.Debug("Sending kill signal to process") v.cmd.Process.Kill() v.mux.Unlock() // Wait for the program to actually exit log.Debug("Waiting on the process to finish") <-v.doneWaitChan // Change the status to paused log.Debug("Change status") v.mux.Lock() v.job.Status = common.STATUS_QUIT v.mux.Unlock() log.WithField("Task", v.job.UUID).Debug("Task has been quit successfully.") return v.job }
func (t *TownClient) getSValue() (sValue string) { log.WithField("tag", TAG).Info("getting sValue for town login") sValue = "" var doc *goquery.Document var e error log.WithField("tag", TAG).Infof("GET %v", ROOT) if doc, e = goquery.NewDocument(ROOT); e != nil { log.WithField("tag", TAG).Errorf("%s", e.Error()) return } doc.Find("input").Each(func(i int, s *goquery.Selection) { attr, exists := s.Attr("name") if exists == true { if attr == "s" { bla, exists := s.Attr("value") if exists == true { sValue = bla } } } }) log.WithField("tag", TAG).Infof("sValue: %v", sValue) return sValue }
func (c couchPotato) TestConnection() bool { query := c.FullURL + "/app.available" resp, err := get(query) if err != nil { log.WithField("couchpotato.test", c).Error(err) return false } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { log.WithField("couchpotato.test", c).Error(resp.Status) return false } var r couchPotato if err = json.NewDecoder(resp.Body).Decode(&r); err != nil { log.WithFields(log.Fields{ "couchpotato.test": c, "reason": "possibly bad api key", }).Error(err) return false } return r.Success }
//return the Daily url or "" if something went wrong func (t *TownClient) GetDailyUrl() (string, error) { log.WithField("tag", TAG).Info("getting Daily Url for town") req, err := http.NewRequest("GET", DAILY, nil) if err != nil { log.WithField("tag", TAG).Error(err.Error()) return "", err } t.addHeader(req) if t.cookies != nil { for _, cookie := range t.cookies { req.AddCookie(cookie) } } resp, err := clientRed.Do(req) if resp == nil { return "", err } resp.Close = true defer resp.Body.Close() lv := resp.Header.Get("Location") if lv == "" { return "", errors.New("no Location header|most likely town annoucment") } return lv, nil }
//execute ajax thank request for a post func (t *TownClient) ThankPost(postid string, token string) (err error) { log.WithField("tag", TAG).Infof("thanking post %s", postid) param := url.Values{} param.Set("do", "thanks") param.Add("postid", postid) param.Add("securitytoken", token) param.Add("s", "") req, err := http.NewRequest("POST", THANKS, strings.NewReader(param.Encode())) if err != nil { return } log.WithField("tag", TAG).Infof("POST url: %v", THANKS) t.addHeader(req) if t.cookies != nil { for _, cookie := range t.cookies { req.AddCookie(cookie) } } resp, err := client.Do(req) if err != nil { return } resp.Close = true resp.Body.Close() return }
//http get using the given sUrl func (t *TownClient) Get(sUrl string) (*http.Response, error) { log.WithField("tag", TAG).Infof("GET %v", sUrl) req, err := http.NewRequest("GET", sUrl, nil) if err != nil { log.WithField("tag", TAG).Errorf("couldn't create Request to: %v", sUrl) return nil, err } t.addHeader(req) if t.cookies != nil { for _, cookie := range t.cookies { req.AddCookie(cookie) } } //connect to sUrl resp, err := client.Do(req) if err != nil { log.WithField("tag", TAG).Errorf("couldn't connect to: %v", sUrl) return nil, err } return resp, nil }
func main() { logger := logrus.WithFields(logrus.Fields{ "gitcommit": GITCOMMIT, }) logger.Info("Starting rancher-compose-executor") eventHandlers := map[string]events.EventHandler{ "environment.create": handlers.CreateEnvironment, "ping": func(event *events.Event, apiClient *client.RancherClient) error { return nil }, } router, err := events.NewEventRouter("rancher-compose-executor", 2000, os.Getenv("CATTLE_URL"), os.Getenv("CATTLE_ACCESS_KEY"), os.Getenv("CATTLE_SECRET_KEY"), nil, eventHandlers, "environment", 10) if err != nil { logrus.WithField("error", err).Fatal("Unable to create event router") } if err := router.Start(nil); err != nil { logrus.WithField("error", err).Fatal("Unable to start event router") } logger.Info("Exiting rancher-compose-executor") }
func runRecv(iterations int, profile bool) { if profile { f, err := os.Create("recv.profile") if err != nil { panic(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() f, err = os.Create("recv.mem") if err != nil { panic(err) } defer func() { pprof.WriteHeapProfile(f) f.Close() }() } listener := newListener("", iterations) _, err := hyenad.NewHyenaClient(2, listener) if err != nil { panic(err) } log.WithField("Waiting for messages", iterations).Info("Receiving Client started") listener.wait.Wait() duration := time.Since(listener.startTime) msgPerS := float64(iterations) / float64(duration.Seconds()) log.WithField("Messages", iterations).WithField("Time", duration.String()).WithField("Msg/S", msgPerS).Info("Done") }
// ListApplication lists application iamges. func ImagesListApplication(c *cli.Context) { client := NewClient(c, DefaultConfig) opts := LoadOpts(c) f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { list, resp, err := client.Images.ListApplication(opt) if err != nil { return nil, nil, err } si := make([]interface{}, len(list)) for i := range list { si[i] = list[i] } return si, resp, err } si, err := PaginateResp(f, opts) if err != nil { logrus.WithField("err", err).Fatal("could not list application images") } list := make([]godo.Image, len(si)) for i := range si { list[i] = si[i].(godo.Image) } err = writeJSON(list, c.App.Writer) if err != nil { logrus.WithField("err", err).Fatal("could not write JSON") } }
// Pause the hashcat run func (v *hascatTasker) Pause() error { log.WithField("task", v.job.UUID).Debug("Attempting to pause hashcat task") // Call status to update the job internals before pausing v.Status() v.mux.Lock() // Because this is queue managed, we should just need to kill the process. // It will be resumed automatically if runtime.GOOS == "windows" { v.cmd.Process.Kill() } else { v.cmd.Process.Signal(syscall.SIGINT) } v.mux.Unlock() // Wait for the program to actually exit <-v.waitChan // Change status to pause v.mux.Lock() v.job.Status = common.STATUS_PAUSED v.mux.Unlock() log.WithField("task", v.job.UUID).Debug("Task paused successfully") return nil }
func (ex *defaultExecuter) completeTask(id string, task func(string) error, onFailure func(string, string)) { defer func() { if r := recover(); r != nil { log.WithField("task", id). Errorln("Task failed", r) debug.PrintStack() go onFailure(id, "The error message is below. Please check logs for more details."+"\n\n"+"panic occurred") ex.cMap.setStatus(id, FAILURE) } }() // Run the task. if err := task(id); err != nil { log.WithFields(log.Fields{ "task": id, "error": errors.ErrorStack(err), }).Error("Task failed") go onFailure(id, "The error message is below. Please check logs for more details."+"\n\n"+errors.ErrorStack(err)) ex.cMap.setStatus(id, FAILURE) return } log.WithField("task", id). Info("Task succeeded") ex.cMap.setStatus(id, SUCCESS) }
func (v *hascatTasker) Quit() common.Job { log.WithField("task", v.job.UUID).Debug("Attempting to quit hashcat task") // Call status to update the job internals before quiting v.Status() v.mux.Lock() if runtime.GOOS == "windows" { v.cmd.Process.Kill() } else { v.cmd.Process.Signal(syscall.SIGINT) } v.mux.Unlock() // Wait for the program to actually exit <-v.waitChan v.mux.Lock() v.job.Status = common.STATUS_QUIT v.mux.Unlock() log.WithField("task", v.job.UUID).Debug("Task quit successfully") return v.job }
/* Pause the job if possible, save the state, etc. */ func (v *johndictTasker) Pause() error { log.WithField("Task", v.job.UUID).Debug("Attempt to pause johndict job") // Update internal status v.Status() v.mux.Lock() // Kill the process after a SIGHUP v.cmd.Process.Signal(syscall.SIGHUP) v.cmd.Process.Kill() v.mux.Unlock() // Wait for the program to actually exit <-v.doneWaitChan // Change the status to paused v.mux.Lock() v.job.Status = common.STATUS_PAUSED v.mux.Unlock() log.WithField("Task", v.job.UUID).Debug("Task has been paused successfully.") return nil }
func print(i interface{}) { log.WithField(reflect.TypeOf(i).String(), fmt.Sprintf("%#v", i)).Print("") if data, err := json.Marshal(i); err == nil { log.WithField(reflect.TypeOf(i).String(), string(data)).Print("") } }
func setupLogging() { switch viper.GetString("log-level") { case "debug": log.SetLevel(log.DebugLevel) case "info": log.SetLevel(log.InfoLevel) case "warn": log.SetLevel(log.WarnLevel) case "error": log.SetLevel(log.ErrorLevel) case "fatal": log.SetLevel(log.FatalLevel) default: log.WithField("log-level", viper.GetString("log-level")).Warning("invalid log level. defaulting to info.") log.SetLevel(log.InfoLevel) } switch viper.GetString("log-format") { case "text": log.SetFormatter(new(log.TextFormatter)) case "json": log.SetFormatter(new(log.JSONFormatter)) default: log.WithField("log-format", viper.GetString("log-format")).Warning("invalid log format. defaulting to text.") log.SetFormatter(new(log.TextFormatter)) } }
func (repo *LocalfsImagerep) fillLocalfsImage(image *models.Image, fileinfo os.FileInfo) bool { // ubuntu-14.04_x86_64_raw.img -> name: ubuntu-14.04, arch: x86_64, type: raw.img imginfo := strings.SplitN(fileinfo.Name(), "_", 3) if len(imginfo) != 3 { log.WithField("image", fileinfo.Name()).Info("skipping image with invalid name") return false } image.Name = imginfo[0] image.Size = fileinfo.Size() image.Date = fileinfo.ModTime() image.Filename = fileinfo.Name() image.FullPath = filepath.Join(repo.Root, fileinfo.Name()) switch imginfo[1] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image architecture") return false case "amd64": image.Arch = models.IMAGE_ARCH_X86_64 case "i386": image.Arch = models.IMAGE_ARCH_X86 } switch imginfo[2] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image type") return false case "raw.img": image.Type = models.IMAGE_FMT_RAW case "qcow2.img": image.Type = models.IMAGE_FMT_QCOW2 } return true }
func init() { // root and persistent flags rootCmd.PersistentFlags().String("log-level", "info", "one of debug, info, warn, error, or fatal") rootCmd.PersistentFlags().String("log-format", "text", "specify output (text or json)") // build flags buildCmd.Flags().String("shell", "bash", "shell to use for executing build scripts") buildCmd.Flags().Int("concurrent-jobs", runtime.NumCPU(), "number of packages to build at once") buildCmd.Flags().String("stream-logs-for", "", "stream logs from a single package") cwd, err := os.Getwd() if err != nil { logrus.WithField("error", err).Warning("could not get working directory") } rootCmd.PersistentFlags().String("search", cwd, "where to look for package definitions") buildCmd.Flags().String("output", path.Join(cwd, "out"), "where to place output packages") buildCmd.Flags().String("logs", path.Join(cwd, "logs"), "where to place build logs") buildCmd.Flags().String("cache", path.Join(cwd, ".hammer-cache"), "where to cache downloads") buildCmd.Flags().Bool("skip-cleanup", false, "skip cleanup step") for _, flags := range []*pflag.FlagSet{rootCmd.PersistentFlags(), buildCmd.Flags()} { err := viper.BindPFlags(flags) if err != nil { logrus.WithField("error", err).Fatal("could not bind flags") } } }
func main() { log.Info("GlusterD starting") context.Init() for _, c := range commands.Commands { context.Rest.SetRoutes(c.Routes()) } sigCh := make(chan os.Signal) signal.Notify(sigCh) go func() { for s := range sigCh { log.WithField("signal", s).Debug("Signal recieved") switch s { case os.Interrupt: log.WithField("signal", s).Info("Recieved SIGTERM. Stopping GlusterD.") context.Rest.Stop() log.Info("Termintaing GlusterD.") os.Exit(0) default: continue } } }() err := context.Rest.Listen() if err != nil { log.Fatal("Could not start GlusterD Rest Server. Aborting.") } }
// Retry port open until it succeeds func openUsbPortWithRetry(port string) *os.File { errorLogged := false for { f, err := os.OpenFile(port, os.O_RDWR, 0) if err == nil { log.WithField("port", port).Info("openUsbPortWithRetry connected") // Set raw mode on raspberry pi, if we don't set raw mode // xon/xoff character in the frame buffer cause problems if runtime.GOOS == "linux" { cmd := exec.Command("stty", "-F", port, "raw") if err := cmd.Run(); err != nil { log.WithField("error", err.Error()).Error("openUsbPortWithRetry failed to set stty raw mode") } } return f } if !errorLogged { log.WithField("error", err.Error()).Warn("openUsbPortWithRetry failed to open port") errorLogged = true } // Try again in a second time.Sleep(1000 * time.Millisecond) } }
// Handle HTTP requests to run zero or more animation specified in json payload func RunAnimationsHandler(w http.ResponseWriter, r *http.Request) { // JSON body of form // [{"segmentId": "s1", "action": "static", "params": "6f16d4"}, // {"segmentId": "s2", "action": "static", "params": "6f16d4"}] body, err := ioutil.ReadAll(r.Body) if err != nil { log.WithField("err", err.Error()).Error("RunAnimationsHandler bad body") http.Error(w, err.Error(), 400) return } // Un-marshal JSON into typed slice var segments []animations.SegmentAction if err = json.Unmarshal(body, &segments); err != nil { log.WithField("err", err.Error()).Error("RunAnimationsHandler bad body JSON") http.Error(w, err.Error(), 400) return } log.WithField("Decoded JSON", segments).Info("RunAnimationsHandler called") // Perform the animation animations.RunAnimations(segments) // Return controller status allConnected := true for _, v := range controller.TeensyConnections() { allConnected = allConnected && v } d, _ := json.Marshal(allConnected) w.Write(d) }
func (q *Queue) TaskStatus(rpc common.RPCCall, j *common.Job) error { log.WithField("task", j.UUID).Debug("Attempting to gather task status") // Add a defered catch for panic from within the tools defer func() { if err := recover(); err != nil { log.Errorf("Recovered from Panic in Resource.TaskStatus: %v", err) } }() // Grab the task specified by the UUID and return its status q.Lock() defer q.Unlock() _, ok := q.stack[rpc.Job.UUID] // Check for a bad UUID if ok != false { log.WithField("task", j.UUID).Error("Task with UUID provided does not exist.") return errors.New("Task with UUID provided does not exist.") } *j = q.stack[rpc.Job.UUID].Status() return nil }
func (decoder *JSONDecoderV2) Read(ctx context.Context, req *http.Request) error { dec := json.NewDecoder(req.Body) var d JSONDatapointV2 if err := dec.Decode(&d); err != nil { return err } dps := make([]*datapoint.Datapoint, 0, len(d)) for metricType, datapoints := range d { mt, ok := com_signalfx_metrics_protobuf.MetricType_value[strings.ToUpper(metricType)] if !ok { log.WithField("metricType", metricType).Warn("Unknown metric type") continue } for _, jsonDatapoint := range datapoints { v, err := ValueToValue(jsonDatapoint.Value) if err != nil { log.WithField("err", err).Warn("Unable to get value for datapoint") } else { dp := datapoint.New(jsonDatapoint.Metric, jsonDatapoint.Dimensions, v, fromMT(com_signalfx_metrics_protobuf.MetricType(mt)), fromTs(jsonDatapoint.Timestamp)) dps = append(dps, dp) } } } return decoder.Sink.AddDatapoints(ctx, dps) }
func (q *Queue) TaskQuit(rpc common.RPCCall, j *common.Job) error { log.WithField("task", j.UUID).Debug("Attempting to quit task") // Add a defered catch for panic from within the tools defer func() { if err := recover(); err != nil { log.Errorf("Recovered from Panic in Resource.TaskQuit: %v", err) } }() // Grab a lock and set the unlock on return q.Lock() defer q.Unlock() // Grab the task specified by the UUID _, ok := q.stack[rpc.Job.UUID] // Check for a bad UUID if ok != false { log.WithField("task", j.UUID).Debug("Task with UUID provided does not exist.") return errors.New("Task with UUID provided does not exist.") } // Quit the task and return the final result *j = q.stack[rpc.Job.UUID].Quit() // Remove quit job from stack delete(q.stack, rpc.Job.UUID) log.WithField("task", j.UUID).Debug("Task ran successfully") return nil }
func Main() { logger := logrus.WithFields(logrus.Fields{ "version": version.VERSION, }) logger.Info("Starting rancher-compose-executor") eventHandlers := map[string]events.EventHandler{ "stack.create": handlers.WithTimeout(handlers.CreateStack), "stack.upgrade": handlers.WithTimeout(handlers.UpgradeStack), "stack.finishupgrade": handlers.WithTimeout(handlers.FinishUpgradeStack), "stack.rollback": handlers.WithTimeout(handlers.RollbackStack), "ping": func(event *events.Event, apiClient *client.RancherClient) error { return nil }, } router, err := events.NewEventRouter("rancher-compose-executor", 2000, os.Getenv("CATTLE_URL"), os.Getenv("CATTLE_ACCESS_KEY"), os.Getenv("CATTLE_SECRET_KEY"), nil, eventHandlers, "stack", 10, events.DefaultPingConfig) if err != nil { logrus.WithField("error", err).Fatal("Unable to create event router") } if err := router.Start(nil); err != nil { logrus.WithField("error", err).Fatal("Unable to start event router") } logger.Info("Exiting rancher-compose-executor") }