// Init() initializes a context from a configuration file into an // existing context struct func Init(path string) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving Init()"}.Debug() }() err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } ctx.Channels.Log = make(chan mig.Log, 37) ctx.Server.BaseURL = ctx.Server.Host + ctx.Server.BaseRoute ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-api") if err != nil { panic(err) } ctx, err = initDirectories(ctx) if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } return }
func main() { var ( err error conf Config ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker verifying agents that fail to authenticate\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/agent-verif-worker.cfg", "Load configuration from file") flag.Parse() err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // set a binding to route events from mig.Ev_Q_Agt_Auth_Fail into the queue named after the worker // and return a channel that consumes the queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_Auth_Fail) if err != nil { panic(err) } fmt.Println("started worker", workerName, "consuming queue", workerQueue, "from key", mig.Ev_Q_Agt_Auth_Fail) for event := range consumerChan { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("unverified agent '%s'", event.Body)}) } return }
// Init() initializes a context from a configuration file into an // existing context struct func Init(path string) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving Init()"}.Debug() }() err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } // daemonize unless logging is set to stdout if ctx.Logging.Mode != "stdout" { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } ctx, err = initChannels(ctx) if err != nil { panic(err) } ctx.Logging, err = mig.InitLogger(ctx.Logging) if err != nil { panic(err) } ctx, err = initDirectories(ctx) if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } ctx, err = initBroker(ctx) if err != nil { panic(err) } return }
// Init() initializes a context from a configuration file into an // existing context struct func Init(path string) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } }() err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } ctx, err = initChannels(ctx) if err != nil { panic(err) } ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-scheduler") if err != nil { panic(err) } ctx, err = initDirectories(ctx) if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } ctx, err = initRelay(ctx) if err != nil { panic(err) } ctx, err = initSecring(ctx) if err != nil { panic(err) } return }
// Init() initializes a context from a configuration file into an // existing context struct func Init(path string, debug bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } }() ctx.Channels.Log = make(chan mig.Log, 37) err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } ctx.Server.BaseURL = ctx.Server.Host + ctx.Server.BaseRoute ctx.Authentication.duration, err = time.ParseDuration(ctx.Authentication.TokenDuration) if err != nil { panic(err) } if debug { ctx.Logging.Level = "debug" ctx.Logging.Mode = "stdout" } ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-api") if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } if ctx.MaxMind.Path != "" { ctx.MaxMind.r, err = geo.Open(ctx.MaxMind.Path) if err != nil { panic(err) } } return }
// Init() initializes a context from a configuration file into an // existing context struct func Init(path string) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving Init()"}.Debug() }() err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } ctx, err = initChannels(ctx) if err != nil { panic(err) } ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-scheduler") if err != nil { panic(err) } ctx, err = initDirectories(ctx) if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } ctx, err = initBroker(ctx) if err != nil { panic(err) } return }
func main() { var ( err error conf Config items []gozdef.ComplianceItem ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker that transform commands results into compliance items and publishes them to mozdef\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/compliance-item-worker.cfg", "Load configuration from file") flag.Parse() err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // bind to the MIG even queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Cmd_Res) if err != nil { panic(err) } // bind to the mozdef relay exchange gp, err := gozdef.InitAmqp(conf.MozDef) if err != nil { panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Cmd_Res}) tFamRe := regexp.MustCompile("(?i)^compliance$") for event := range consumerChan { var cmd mig.Command err = json.Unmarshal(event.Body, &cmd) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid command: %v", err)}.Err()) } // discard actions that aren't threat.family=compliance if !tFamRe.MatchString(cmd.Action.Threat.Family) { continue } items, err = makeComplianceItem(cmd, conf) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make compliance items: %v", err)}.Err()) } for _, item := range items { // create a new event and set values in the fields ev, err := gozdef.NewEvent() if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make new mozdef event: %v", err)}.Err()) } ev.Category = "complianceitems" ev.Source = "mig" cverb := "fails" if item.Compliance { cverb = "passes" } ev.Summary = fmt.Sprintf("%s %s compliance with %s", item.Target, cverb, item.Check.Ref) ev.Tags = append(ev.Tags, "mig") ev.Tags = append(ev.Tags, "compliance") ev.Info() ev.Details = item err = gp.Send(ev) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err()) // if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one panic(err) } } mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("published %d items from command %.0f to mozdef", len(items), cmd.ID)}.Info()) } return }
// Init prepare the AMQP connections to the broker and launches the // goroutines that will process commands received by the MIG Scheduler func Init(foreground, upgrade bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initAgent() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug() }() ctx.Agent.Tags = TAGS ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent") if err != nil { panic(err) } // create the go channels ctx, err = initChannels(ctx) if err != nil { panic(err) } // Logging GoRoutine, go func() { for event := range ctx.Channels.Log { _, err := mig.ProcessLog(ctx.Logging, event) if err != nil { fmt.Println("Unable to process logs") } } }() ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug() // defines whether the agent should respawn itself or not // this value is overriden in the daemonize calls if the agent // is controlled by systemd, upstart or launchd ctx.Agent.Respawn = ISIMMORTAL // get the path of the executable ctx.Agent.BinPath, err = osext.Executable() if err != nil { panic(err) } // retrieve the hostname ctx, err = findHostname(ctx) if err != nil { panic(err) } // retrieve information about the operating system ctx.Agent.Env.OS = runtime.GOOS ctx.Agent.Env.Arch = runtime.GOARCH ctx, err = findOSInfo(ctx) if err != nil { panic(err) } ctx, err = findLocalIPs(ctx) if err != nil { panic(err) } // Attempt to discover the public IP if DISCOVERPUBLICIP { ctx, err = findPublicIP(ctx) if err != nil { panic(err) } } // find the run directory ctx.Agent.RunDir = getRunDir() // get the agent ID ctx, err = initAgentID(ctx) if err != nil { panic(err) } // build the agent message queue location ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s.%s", ctx.Agent.Env.OS, ctx.Agent.Hostname, ctx.Agent.UID) // daemonize if not in foreground mode if !foreground { // give one second for the caller to exit time.Sleep(time.Second) ctx, err = daemonize(ctx, upgrade) if err != nil { panic(err) } } ctx.Sleeper = HEARTBEATFREQ if err != nil { panic(err) } // parse the ACLs ctx, err = initACL(ctx) if err != nil { panic(err) } connected := false // connect to the message broker ctx, err = initMQ(ctx, false, "") if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Debug() // if the connection failed, look for a proxy // in the environment variables, and try again ctx, err = initMQ(ctx, true, "") if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Debug() // still failing, try connecting using the proxies in the configuration for _, proxy := range PROXIES { ctx, err = initMQ(ctx, true, proxy) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Debug() continue } connected = true goto mqdone } } else { connected = true } } else { connected = true } mqdone: if !connected { panic("Failed to connect to the relay") } // catch interrupts c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { sig := <-c ctx.Channels.Terminate <- sig.String() }() // try to connect the stat socket until it works // this may fail if one agent is already running if SOCKET != "" { go func() { for { ctx.Socket.Bind = SOCKET ctx, err = initSocket(ctx) if err == nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info() goto socketdone } ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err() time.Sleep(60 * time.Second) } socketdone: return }() } return }
// Init prepare the AMQP connections to the broker and launches the // goroutines that will process commands received by the MIG Scheduler func Init(foreground bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initAgent() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug() }() // find out current working dir and build the bin path // it's important to do that before we daemonize, other the cwd will be / cdir, err := os.Getwd() if err != nil { panic(err) } ctx.Agent.BinPath = cdir + "/" + os.Args[0] // daemonize, and force logging to stdout if !foreground && LOGGINGCONF.Mode != "stdout" { LOGGINGCONF.Mode = "stdout" godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } // store heartbeat frequency ctx.Sleeper = HEARTBEATFREQ if err != nil { panic(err) } // create the go channels ctx, err = initChannels(ctx) if err != nil { panic(err) } // initiate logging configuration ctx.Logging, err = mig.InitLogger(LOGGINGCONF) if err != nil { panic(err) } // Goroutine that handles events, such as logs and panics, // and decides what to do with them go func() { for event := range ctx.Channels.Log { stop, err := mig.ProcessLog(ctx.Logging, event) if err != nil { fmt.Println("Unable to process logs") } // if ProcessLog says we should stop now, feed the Terminate chan if stop { ctx.Channels.Terminate <- fmt.Errorf(event.Desc) } } }() // retrieve information on agent environment ctx, err = initAgentEnv(ctx) if err != nil { panic(err) } // connect to the message broker ctx, err = initMQ(ctx) if err != nil { panic(err) } return }
func main() { var ( err error conf Config hint gozdef.HostAssetHint ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker that listens to new endpoints and sends them as assets to mozdef\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/agent-intel-worker.cfg", "Load configuration from file") flag.Parse() err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // bind to the MIG even queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_New) if err != nil { panic(err) } // bind to the mozdef relay exchange gp, err := gozdef.InitAmqp(conf.MozDef) if err != nil { panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Agt_New}) for event := range consumerChan { var agt mig.Agent err = json.Unmarshal(event.Body, &agt) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid agent description: %v", err)}.Err()) continue } agt, err = populateTeam(agt, conf) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to populate agent team: %v", err)}.Err()) } hint, err = makeHintFromAgent(agt) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to build asset hint: %v", err)}.Err()) continue } err = publishHintToMozdef(hint, gp) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err()) // if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "published asset hint for agent '" + hint.Name + "' to mozdef"}.Info()) } return }