func main() { var ( err error conf Config ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker verifying agents that fail to authenticate\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/agent-verif-worker.cfg", "Load configuration from file") flag.Parse() err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // set a binding to route events from mig.Ev_Q_Agt_Auth_Fail into the queue named after the worker // and return a channel that consumes the queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_Auth_Fail) if err != nil { panic(err) } fmt.Println("started worker", workerName, "consuming queue", workerQueue, "from key", mig.Ev_Q_Agt_Auth_Fail) for event := range consumerChan { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("unverified agent '%s'", event.Body)}) } return }
func main() { var err error var config = flag.String("c", "/etc/mig/runner.cfg", "Load configuration from file") var showversion = flag.Bool("V", false, "Show build version and exit") flag.Parse() if *showversion { fmt.Println(mig.Version) os.Exit(0) } ctx, err = initContext(*config) if err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) os.Exit(9) } wg.Add(1) go func() { var stop bool for event := range ctx.Channels.Log { stop, err = mig.ProcessLog(ctx.Logging, event) if err != nil { panic("unable to process log") } if stop { break } } wg.Done() }() mlog("logging routine started") sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt, os.Kill) go func() { <-sigch mlog("signal, exiting") ctx.Channels.ExitNotify <- true }() err = loadPlugins() if err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) doExit(9) } // Start up the results processor go processResults() err = runnerScan() if err != nil { mlog("runner error: %v", err) doExit(9) } doExit(0) }
func initContext() (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initContext() -> %v", e) } }() ctx.Channels.Log = make(chan mig.Log, 37) ctx.Logging, err = getLoggingConf() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-loader") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } wg.Add(1) go func() { var stop bool for event := range ctx.Channels.Log { // Also write the message to stderr to ease debugging fmt.Fprintf(os.Stderr, "%v\n", event.Desc) stop, err = mig.ProcessLog(ctx.Logging, event) if err != nil { panic("unable to process log") } if stop { break } } wg.Done() }() logInfo("logging routine started") ctx.LoaderKey = LOADERKEY hints := agentcontext.AgentContextHints{ DiscoverPublicIP: DISCOVERPUBLICIP, DiscoverAWSMeta: DISCOVERAWSMETA, APIUrl: APIURL, Proxies: PROXIES[:], } actx, err := agentcontext.NewAgentContext(ctx.Channels.Log, hints) if err != nil { panic(err) } ctx.AgentIdentifier = actx.ToAgent() return }
// Init prepare the AMQP connections to the broker and launches the // goroutines that will process commands received by the MIG Scheduler func Init(foreground, upgrade bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initAgent() -> %v", e) } if ctx.Channels.Log != nil { ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug() } }() // Pick up a lock on Context Agent field as we will be updating or reading it here and in // various functions called from here such as daemonize(). ctx.Agent.Lock() defer ctx.Agent.Unlock() ctx.Agent.Tags = TAGS ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent") if err != nil { panic(err) } // create the go channels ctx, err = initChannels(ctx) if err != nil { panic(err) } // Logging GoRoutine, go func() { for event := range ctx.Channels.Log { _, err := mig.ProcessLog(ctx.Logging, event) if err != nil { fmt.Println("Unable to process logs") } } }() ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug() // Gather new agent context information to use as the context for this // agent invocation hints := agentcontext.AgentContextHints{ DiscoverPublicIP: DISCOVERPUBLICIP, DiscoverAWSMeta: DISCOVERAWSMETA, APIUrl: APIURL, Proxies: PROXIES[:], } actx, err := agentcontext.NewAgentContext(ctx.Channels.Log, hints) if err != nil { panic(err) } // defines whether the agent should respawn itself or not // this value is overriden in the daemonize calls if the agent // is controlled by systemd, upstart or launchd ctx.Agent.Respawn = ISIMMORTAL // Do initial assignment of values which could change over the lifetime // of the agent process ctx.updateVolatileFromAgentContext(actx) // Set some other values obtained from the agent context which will not // change while the process is running. ctx.Agent.RunDir = actx.RunDir ctx.Agent.BinPath = actx.BinPath // get the agent ID ctx, err = initAgentID(ctx) if err != nil { panic(err) } // build the agent message queue location ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s", ctx.Agent.Env.OS, ctx.Agent.UID) // daemonize if not in foreground mode if !foreground { // give one second for the caller to exit time.Sleep(time.Second) ctx, err = daemonize(ctx, upgrade) if err != nil { panic(err) } } ctx.Sleeper = HEARTBEATFREQ if err != nil { panic(err) } // parse the ACLs ctx, err = initACL(ctx) if err != nil { panic(err) } connected := false // connect to the message broker // // If any proxies have been configured, we try to use those first. If they fail, or // no proxies have been setup, just attempt a direct connection. for _, proxy := range PROXIES { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Trying proxy %v for relay connection", proxy)}.Debug() ctx, err = initMQ(ctx, true, proxy) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Info() continue } connected = true goto mqdone } // Try and proxy that has been specified in the environment ctx.Channels.Log <- mig.Log{Desc: "Trying proxies from environment for relay connection"}.Debug() ctx, err = initMQ(ctx, true, "") if err == nil { connected = true goto mqdone } else { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Info() } // Fall back to a direct connection ctx.Channels.Log <- mig.Log{Desc: "Trying direct relay connection"}.Debug() ctx, err = initMQ(ctx, false, "") if err == nil { connected = true } else { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Info() } mqdone: if !connected { panic("Failed to connect to the relay") } // catch interrupts c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { sig := <-c ctx.Channels.Terminate <- sig.String() }() // try to connect the stat socket until it works // this may fail if one agent is already running if SOCKET != "" { go func() { for { ctx.Socket.Bind = SOCKET ctx, err = initSocket(ctx) if err == nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info() goto socketdone } ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err() time.Sleep(60 * time.Second) } socketdone: return }() } return }
func main() { var err error cpus := runtime.NumCPU() runtime.GOMAXPROCS(cpus) // command line options var config = flag.String("c", "/etc/mig/api.cfg", "Load configuration from file") var debug = flag.Bool("d", false, "Debug mode: run in foreground, log to stdout.") var showversion = flag.Bool("V", false, "Show build version and exit") flag.Parse() if *showversion { fmt.Println(mig.Version) os.Exit(0) } // The context initialization takes care of parsing the configuration, // and creating connections to database, syslog, ... fmt.Fprintf(os.Stderr, "Initializing API context...") ctx, err = Init(*config, *debug) //ctx is a global variable if err != nil { fmt.Printf("\nFATAL: %v\n", err) os.Exit(9) } fmt.Fprintf(os.Stderr, "OK\n") ctx.Channels.Log <- mig.Log{Desc: "Context initialization done"} // Goroutine that handles events, such as logs and panics, // and decides what to do with them go func() { for event := range ctx.Channels.Log { stop, err := mig.ProcessLog(ctx.Logging, event) if err != nil { panic("Unable to process logs") } // if ProcessLog says we should stop if stop { panic("Logger routine asked to stop") } } }() ctx.Channels.Log <- mig.Log{Desc: "Logger routine started"} // register routes r := mux.NewRouter() s := r.PathPrefix(ctx.Server.BaseRoute).Subrouter() // unauthenticated endpoints s.HandleFunc("/heartbeat", getHeartbeat).Methods("GET") s.HandleFunc("/ip", getIP).Methods("GET") // Loader manifest endpoints, use loader specific authentication on // the request s.HandleFunc("/manifest/agent/", authenticateLoader(getAgentManifest)).Methods("POST") s.HandleFunc("/manifest/fetch/", authenticateLoader(getManifestFile)).Methods("POST") // Investigator resources that require authentication s.HandleFunc("/search", authenticate(search, false)).Methods("GET") s.HandleFunc("/action", authenticate(getAction, false)).Methods("GET") s.HandleFunc("/action/create/", authenticate(createAction, false)).Methods("POST") s.HandleFunc("/command", authenticate(getCommand, false)).Methods("GET") s.HandleFunc("/agent", authenticate(getAgent, false)).Methods("GET") s.HandleFunc("/dashboard", authenticate(getDashboard, false)).Methods("GET") // Administrator resources s.HandleFunc("/loader", authenticate(getLoader, true)).Methods("GET") s.HandleFunc("/loader/status/", authenticate(statusLoader, true)).Methods("POST") s.HandleFunc("/loader/key/", authenticate(keyLoader, true)).Methods("POST") s.HandleFunc("/loader/new/", authenticate(newLoader, true)).Methods("POST") s.HandleFunc("/manifest", authenticate(getManifest, true)).Methods("GET") s.HandleFunc("/manifest/sign/", authenticate(signManifest, true)).Methods("POST") s.HandleFunc("/manifest/status/", authenticate(statusManifest, true)).Methods("POST") s.HandleFunc("/manifest/new/", authenticate(newManifest, true)).Methods("POST") s.HandleFunc("/manifest/loaders/", authenticate(manifestLoaders, true)).Methods("GET") s.HandleFunc("/investigator", authenticate(getInvestigator, true)).Methods("GET") s.HandleFunc("/investigator/create/", authenticate(createInvestigator, true)).Methods("POST") s.HandleFunc("/investigator/update/", authenticate(updateInvestigator, true)).Methods("POST") ctx.Channels.Log <- mig.Log{Desc: "Starting HTTP handler"} // all set, start the http handler http.Handle("/", context.ClearHandler(r)) listenAddr := fmt.Sprintf("%s:%d", ctx.Server.IP, ctx.Server.Port) err = http.ListenAndServe(listenAddr, nil) if err != nil { panic(err) } }
func main() { var ( err error conf Config hint gozdef.HostAssetHint ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker that listens to new endpoints and sends them as assets to mozdef\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/agent-intel-worker.cfg", "Load configuration from file") var showversion = flag.Bool("V", false, "Show build version and exit") flag.Parse() if *showversion { fmt.Println(mig.Version) os.Exit(0) } err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // bind to the MIG even queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_New) if err != nil { panic(err) } // bind to the mozdef relay exchange gp, err := gozdef.InitAmqp(conf.MozDef) if err != nil { panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Agt_New}) for event := range consumerChan { var agt mig.Agent err = json.Unmarshal(event.Body, &agt) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid agent description: %v", err)}.Err()) continue } agt, err = populateTeam(agt, conf) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to populate agent team: %v", err)}.Err()) } hint, err = makeHintFromAgent(agt) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to build asset hint: %v", err)}.Err()) continue } err = publishHintToMozdef(hint, gp) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err()) // if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "published asset hint for agent '" + hint.Name + "' to mozdef"}.Info()) } return }
func main() { var ( err error conf Config items []gozdef.ComplianceItem ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s - a worker that transform commands results into compliance items and publishes them to mozdef\n", os.Args[0]) flag.PrintDefaults() } var configPath = flag.String("c", "/etc/mig/compliance-item-worker.cfg", "Load configuration from file") var showversion = flag.Bool("V", false, "Show build version and exit") flag.Parse() if *showversion { fmt.Println(mig.Version) os.Exit(0) } err = gcfg.ReadFileInto(&conf, *configPath) if err != nil { panic(err) } logctx, err := mig.InitLogger(conf.Logging, workerName) if err != nil { panic(err) } // bind to the MIG even queue workerQueue := "migevent.worker." + workerName consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Cmd_Res) if err != nil { panic(err) } // bind to the mozdef relay exchange gp, err := gozdef.InitAmqp(conf.MozDef) if err != nil { panic(err) } mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Cmd_Res}) tFamRe := regexp.MustCompile("(?i)^compliance$") for event := range consumerChan { var cmd mig.Command err = json.Unmarshal(event.Body, &cmd) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid command: %v", err)}.Err()) } // discard actions that aren't threat.family=compliance if !tFamRe.MatchString(cmd.Action.Threat.Family) { continue } items, err = makeComplianceItem(cmd, conf) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make compliance items: %v", err)}.Err()) } for _, item := range items { // create a new event and set values in the fields ev, err := gozdef.NewEvent() if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make new mozdef event: %v", err)}.Err()) } ev.Category = "complianceitems" ev.Source = "mig" cverb := "fails" if item.Compliance { cverb = "passes" } ev.Summary = fmt.Sprintf("%s %s compliance with %s", item.Target, cverb, item.Check.Ref) ev.Tags = append(ev.Tags, "mig") ev.Tags = append(ev.Tags, "compliance") ev.Info() ev.Details = item err = gp.Send(ev) if err != nil { mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err()) // if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one panic(err) } } mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("published %d items from command %.0f to mozdef", len(items), cmd.ID)}.Info()) } return }
func startRoutines(ctx Context) { // Goroutine that handles events, such as logs and panics, // and decides what to do with them go func() { for event := range ctx.Channels.Log { stop, err := mig.ProcessLog(ctx.Logging, event) if err != nil { panic("Unable to process logs") } // if ProcessLog says we should stop now, feed the Terminate chan if stop { ctx.Channels.Terminate <- errors.New(event.Desc) } } }() ctx.Channels.Log <- mig.Log{Desc: "mig.ProcessLog() routine started"} // Goroutine that loads actions dropped into ctx.Directories.Action.New go func() { for actionPath := range ctx.Channels.NewAction { ctx.OpID = mig.GenID() err := processNewAction(actionPath, ctx) // if something fails in the action processing, move it to the invalid folder if err != nil { // move action to INVALID folder and log dest := fmt.Sprintf("%s/%d.json", ctx.Directories.Action.Invalid, time.Now().UTC().UnixNano()) os.Rename(actionPath, dest) reason := fmt.Sprintf("%v. '%s' moved to '%s'", err, actionPath, dest) ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: reason}.Warning() } } }() ctx.Channels.Log <- mig.Log{Desc: "processNewAction() routine started"} // Goroutine that loads and sends commands dropped in ready state // it uses a select and a timeout to load a batch of commands instead of // sending them one by one go func() { ctx.OpID = mig.GenID() readyCmd := make(map[float64]mig.Command) ctr := 0 for { select { case cmd := <-ctx.Channels.CommandReady: ctr++ readyCmd[cmd.ID] = cmd case <-time.After(1 * time.Second): if ctr > 0 { var cmds []mig.Command for id, cmd := range readyCmd { cmds = append(cmds, cmd) delete(readyCmd, id) } err := sendCommands(cmds, ctx) if err != nil { ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err() } } // reinit ctx.OpID = mig.GenID() ctr = 0 } } }() ctx.Channels.Log <- mig.Log{Desc: "sendCommands() routine started"} // Goroutine that loads commands from the ctx.Directories.Command.Returned and marks // them as finished or cancelled go func() { ctx.OpID = mig.GenID() returnedCmd := make(map[uint64]string) var ctr uint64 = 0 for { select { case cmdFile := <-ctx.Channels.CommandReturned: ctr++ returnedCmd[ctr] = cmdFile case <-time.After(1 * time.Second): if ctr > 0 { var cmdFiles []string for id, cmdFile := range returnedCmd { cmdFiles = append(cmdFiles, cmdFile) delete(returnedCmd, id) } err := returnCommands(cmdFiles, ctx) if err != nil { ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err() } } // reinit ctx.OpID = mig.GenID() ctr = 0 } } }() ctx.Channels.Log <- mig.Log{Desc: "terminateCommand() routine started"} // Goroutine that updates an action when a command is done go func() { ctx.OpID = mig.GenID() doneCmd := make(map[float64]mig.Command) ctr := 0 for { select { case cmd := <-ctx.Channels.CommandDone: ctr++ doneCmd[cmd.ID] = cmd case <-time.After(1 * time.Second): if ctr > 0 { var cmds []mig.Command for id, cmd := range doneCmd { cmds = append(cmds, cmd) delete(doneCmd, id) } err := updateAction(cmds, ctx) if err != nil { ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err() } } // reinit ctx.OpID = mig.GenID() ctr = 0 } } }() ctx.Channels.Log <- mig.Log{Desc: "updateAction() routine started"} // start a listening channel to receive heartbeats from agents heartbeatsChan, err := startHeartbeatsListener(ctx) if err != nil { panic(err) } go func() { for msg := range heartbeatsChan { ctx.OpID = mig.GenID() err := getHeartbeats(msg, ctx) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("heartbeat routine failed with error '%v'", err)}.Err() } } }() ctx.Channels.Log <- mig.Log{Desc: "agents heartbeats listener routine started"} // start a listening channel to results from agents agtResultsChan, err := startResultsListener(ctx) if err != nil { panic(err) } go func() { for delivery := range agtResultsChan { ctx.OpID = mig.GenID() // validate the size of the data received, and make sure its first and // last bytes are valid json enclosures. if not, discard the message. if len(delivery.Body) < 10 || delivery.Body[0] != '{' || delivery.Body[len(delivery.Body)-1] != '}' { ctx.Channels.Log <- mig.Log{ OpID: ctx.OpID, Desc: fmt.Sprintf("discarding invalid message received in results channel"), }.Err() continue } // write to disk in Returned directory, discard and continue on failure dest := fmt.Sprintf("%s/%.0f", ctx.Directories.Command.Returned, ctx.OpID) err = safeWrite(ctx, dest, delivery.Body) if err != nil { ctx.Channels.Log <- mig.Log{ OpID: ctx.OpID, Desc: fmt.Sprintf("failed to write agent results to disk: %v", err), }.Err() continue } // publish an event in the command results queue err = sendEvent(mig.Ev_Q_Cmd_Res, delivery.Body, ctx) if err != nil { panic(err) } } }() ctx.Channels.Log <- mig.Log{Desc: "agents results listener routine started"} // launch the routine that regularly walks through the local directories go func() { collectorSleeper, err := time.ParseDuration(ctx.Collector.Freq) if err != nil { panic(err) } for { ctx.OpID = mig.GenID() err := collector(ctx) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("collector routined failed with error '%v'", err)}.Err() } time.Sleep(collectorSleeper) } }() ctx.Channels.Log <- mig.Log{Desc: "collector routine started"} // launch the routine that periodically runs jobs go func() { periodicSleeper, err := time.ParseDuration(ctx.Periodic.Freq) if err != nil { panic(err) } for { ctx.OpID = mig.GenID() err := periodic(ctx) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("period routine failed with error '%v'", err)}.Err() } time.Sleep(periodicSleeper) } }() ctx.Channels.Log <- mig.Log{Desc: "periodic routine started"} // launch the routine that cleans up unused amqp queues go func() { sleeper, err := time.ParseDuration(ctx.Periodic.QueuesCleanupFreq) if err != nil { panic(err) } for { ctx.OpID = mig.GenID() err = QueuesCleanup(ctx) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("queues cleanup routine failed with error '%v'", err)}.Err() } time.Sleep(sleeper) } }() ctx.Channels.Log <- mig.Log{Desc: "queue cleanup routine started"} // launch the routine that handles multi agents on same queue if ctx.Agent.KillDupAgents { go func() { for queueLoc := range ctx.Channels.DetectDupAgents { ctx.OpID = mig.GenID() err = killDupAgents(queueLoc, ctx) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err() } } }() ctx.Channels.Log <- mig.Log{Desc: "killDupAgents() routine started"} } // block here until a terminate message is received exitReason := <-ctx.Channels.Terminate fmt.Fprintf(os.Stderr, "Scheduler is shutting down. Reason: %s", exitReason) Destroy(ctx) return }
// Init prepare the AMQP connections to the broker and launches the // goroutines that will process commands received by the MIG Scheduler func Init(foreground, upgrade bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initAgent() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug() }() ctx.Agent.Tags = TAGS ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent") if err != nil { panic(err) } // create the go channels ctx, err = initChannels(ctx) if err != nil { panic(err) } // Logging GoRoutine, go func() { for event := range ctx.Channels.Log { _, err := mig.ProcessLog(ctx.Logging, event) if err != nil { fmt.Println("Unable to process logs") } } }() ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug() // defines whether the agent should respawn itself or not // this value is overriden in the daemonize calls if the agent // is controlled by systemd, upstart or launchd ctx.Agent.Respawn = ISIMMORTAL // get the path of the executable ctx.Agent.BinPath, err = osext.Executable() if err != nil { panic(err) } // retrieve the hostname ctx, err = findHostname(ctx) if err != nil { panic(err) } // retrieve information about the operating system ctx.Agent.Env.OS = runtime.GOOS ctx.Agent.Env.Arch = runtime.GOARCH ctx, err = findOSInfo(ctx) if err != nil { panic(err) } ctx, err = findLocalIPs(ctx) if err != nil { panic(err) } // Attempt to discover the public IP if DISCOVERPUBLICIP { ctx, err = findPublicIP(ctx) if err != nil { panic(err) } } // find the run directory ctx.Agent.RunDir = getRunDir() // get the agent ID ctx, err = initAgentID(ctx) if err != nil { panic(err) } // build the agent message queue location ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s.%s", ctx.Agent.Env.OS, ctx.Agent.Hostname, ctx.Agent.UID) // daemonize if not in foreground mode if !foreground { // give one second for the caller to exit time.Sleep(time.Second) ctx, err = daemonize(ctx, upgrade) if err != nil { panic(err) } } ctx.Sleeper = HEARTBEATFREQ if err != nil { panic(err) } // parse the ACLs ctx, err = initACL(ctx) if err != nil { panic(err) } connected := false // connect to the message broker ctx, err = initMQ(ctx, false, "") if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Debug() // if the connection failed, look for a proxy // in the environment variables, and try again ctx, err = initMQ(ctx, true, "") if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Debug() // still failing, try connecting using the proxies in the configuration for _, proxy := range PROXIES { ctx, err = initMQ(ctx, true, proxy) if err != nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Debug() continue } connected = true goto mqdone } } else { connected = true } } else { connected = true } mqdone: if !connected { panic("Failed to connect to the relay") } // catch interrupts c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { sig := <-c ctx.Channels.Terminate <- sig.String() }() // try to connect the stat socket until it works // this may fail if one agent is already running if SOCKET != "" { go func() { for { ctx.Socket.Bind = SOCKET ctx, err = initSocket(ctx) if err == nil { ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info() goto socketdone } ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err() time.Sleep(60 * time.Second) } socketdone: return }() } return }