Exemplo n.º 1
0
Arquivo: main.go Projeto: igofman/mig
func main() {
	var (
		err  error
		conf Config
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker verifying agents that fail to authenticate\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/agent-verif-worker.cfg", "Load configuration from file")
	flag.Parse()
	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}
	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}
	// set a binding to route events from mig.Ev_Q_Agt_Auth_Fail into the queue named after the worker
	// and return a channel that consumes the queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_Auth_Fail)
	if err != nil {
		panic(err)
	}
	fmt.Println("started worker", workerName, "consuming queue", workerQueue, "from key", mig.Ev_Q_Agt_Auth_Fail)
	for event := range consumerChan {
		mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("unverified agent '%s'", event.Body)})
	}
	return
}
Exemplo n.º 2
0
Arquivo: api.go Projeto: netantho/mig
func main() {
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)

	// command line options
	var config = flag.String("c", "/etc/mig/api.cfg", "Load configuration from file")
	flag.Parse()

	// The context initialization takes care of parsing the configuration,
	// and creating connections to database, syslog, ...
	fmt.Fprintf(os.Stderr, "Initializing API context...")
	var err error
	ctx, err = Init(*config)
	if err != nil {
		panic(err)
	}
	fmt.Fprintf(os.Stderr, "OK\n")

	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("Unable to process logs")
			}
			// if ProcessLog says we should stop
			if stop {
				panic("Logger routine asked to stop")
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "mig.ProcessLog() routine started"}

	// register routes
	r := mux.NewRouter()
	s := r.PathPrefix(ctx.Server.BaseRoute).Subrouter()
	s.HandleFunc("/", getHome).Methods("GET")
	s.HandleFunc("/search", search).Methods("GET")
	s.HandleFunc("/action", getAction).Methods("GET")
	s.HandleFunc("/action/create/", describeCreateAction).Methods("GET")
	s.HandleFunc("/action/create/", createAction).Methods("POST")
	s.HandleFunc("/action/cancel/", describeCancelAction).Methods("GET")
	s.HandleFunc("/action/cancel/", cancelAction).Methods("POST")
	s.HandleFunc("/command", getCommand).Methods("GET")
	s.HandleFunc("/command/cancel/", describeCancelCommand).Methods("GET")
	s.HandleFunc("/command/cancel/", cancelCommand).Methods("POST")
	s.HandleFunc("/agent", getAgent).Methods("GET")
	s.HandleFunc("/dashboard", getDashboard).Methods("GET")

	// all set, start the http handler
	http.Handle("/", r)
	listenAddr := fmt.Sprintf("%s:%d", ctx.Server.IP, ctx.Server.Port)
	err = http.ListenAndServe(listenAddr, nil)
	if err != nil {
		panic(err)
	}
}
Exemplo n.º 3
0
Arquivo: main.go Projeto: igofman/mig
func main() {
	var (
		err   error
		conf  Config
		items []gozdef.ComplianceItem
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker that transform commands results into compliance items and publishes them to mozdef\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/compliance-item-worker.cfg", "Load configuration from file")
	flag.Parse()
	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}

	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}

	// bind to the MIG even queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Cmd_Res)
	if err != nil {
		panic(err)
	}

	// bind to the mozdef relay exchange
	gp, err := gozdef.InitAmqp(conf.MozDef)
	if err != nil {
		panic(err)
	}

	mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Cmd_Res})
	tFamRe := regexp.MustCompile("(?i)^compliance$")
	for event := range consumerChan {
		var cmd mig.Command
		err = json.Unmarshal(event.Body, &cmd)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid command: %v", err)}.Err())
		}
		// discard actions that aren't threat.family=compliance
		if !tFamRe.MatchString(cmd.Action.Threat.Family) {
			continue
		}
		items, err = makeComplianceItem(cmd, conf)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make compliance items: %v", err)}.Err())
		}
		for _, item := range items {
			// create a new event and set values in the fields
			ev, err := gozdef.NewEvent()
			if err != nil {
				mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make new mozdef event: %v", err)}.Err())
			}
			ev.Category = "complianceitems"
			ev.Source = "mig"
			cverb := "fails"
			if item.Compliance {
				cverb = "passes"
			}
			ev.Summary = fmt.Sprintf("%s %s compliance with %s", item.Target, cverb, item.Check.Ref)
			ev.Tags = append(ev.Tags, "mig")
			ev.Tags = append(ev.Tags, "compliance")
			ev.Info()
			ev.Details = item
			err = gp.Send(ev)
			if err != nil {
				mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err())
				// if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one
				panic(err)
			}
		}
		mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("published %d items from command %.0f to mozdef", len(items), cmd.ID)}.Info())
	}
	return
}
Exemplo n.º 4
0
// Init prepare the AMQP connections to the broker and launches the
// goroutines that will process commands received by the MIG Scheduler
func Init(foreground, upgrade bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initAgent() -> %v", e)
		}
		ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug()
	}()
	ctx.Agent.Tags = TAGS

	ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent")
	if err != nil {
		panic(err)
	}
	// create the go channels
	ctx, err = initChannels(ctx)
	if err != nil {
		panic(err)
	}
	// Logging GoRoutine,
	go func() {
		for event := range ctx.Channels.Log {
			_, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				fmt.Println("Unable to process logs")
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug()

	// defines whether the agent should respawn itself or not
	// this value is overriden in the daemonize calls if the agent
	// is controlled by systemd, upstart or launchd
	ctx.Agent.Respawn = ISIMMORTAL

	// get the path of the executable
	ctx.Agent.BinPath, err = osext.Executable()
	if err != nil {
		panic(err)
	}

	// retrieve the hostname
	ctx, err = findHostname(ctx)
	if err != nil {
		panic(err)
	}

	// retrieve information about the operating system
	ctx.Agent.Env.OS = runtime.GOOS
	ctx.Agent.Env.Arch = runtime.GOARCH
	ctx, err = findOSInfo(ctx)
	if err != nil {
		panic(err)
	}
	ctx, err = findLocalIPs(ctx)
	if err != nil {
		panic(err)
	}

	// Attempt to discover the public IP
	if DISCOVERPUBLICIP {
		ctx, err = findPublicIP(ctx)
		if err != nil {
			panic(err)
		}
	}

	// find the run directory
	ctx.Agent.RunDir = getRunDir()

	// get the agent ID
	ctx, err = initAgentID(ctx)
	if err != nil {
		panic(err)
	}

	// build the agent message queue location
	ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s.%s", ctx.Agent.Env.OS, ctx.Agent.Hostname, ctx.Agent.UID)

	// daemonize if not in foreground mode
	if !foreground {
		// give one second for the caller to exit
		time.Sleep(time.Second)
		ctx, err = daemonize(ctx, upgrade)
		if err != nil {
			panic(err)
		}
	}

	ctx.Sleeper = HEARTBEATFREQ
	if err != nil {
		panic(err)
	}

	// parse the ACLs
	ctx, err = initACL(ctx)
	if err != nil {
		panic(err)
	}

	connected := false
	// connect to the message broker
	ctx, err = initMQ(ctx, false, "")
	if err != nil {
		ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Debug()
		// if the connection failed, look for a proxy
		// in the environment variables, and try again
		ctx, err = initMQ(ctx, true, "")
		if err != nil {
			ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Debug()
			// still failing, try connecting using the proxies in the configuration
			for _, proxy := range PROXIES {
				ctx, err = initMQ(ctx, true, proxy)
				if err != nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Debug()
					continue
				}
				connected = true
				goto mqdone
			}
		} else {
			connected = true
		}
	} else {
		connected = true
	}
mqdone:
	if !connected {
		panic("Failed to connect to the relay")
	}

	// catch interrupts
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	go func() {
		sig := <-c
		ctx.Channels.Terminate <- sig.String()
	}()

	// try to connect the stat socket until it works
	// this may fail if one agent is already running
	if SOCKET != "" {
		go func() {
			for {
				ctx.Socket.Bind = SOCKET
				ctx, err = initSocket(ctx)
				if err == nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info()
					goto socketdone
				}
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err()
				time.Sleep(60 * time.Second)
			}
		socketdone:
			return
		}()
	}

	return
}
Exemplo n.º 5
0
Arquivo: api.go Projeto: Novemburr/mig
func main() {
	var err error
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)

	// command line options
	var config = flag.String("c", "/etc/mig/api.cfg", "Load configuration from file")
	var showversion = flag.Bool("V", false, "Show build version and exit")
	flag.Parse()

	if *showversion {
		fmt.Println(version)
		os.Exit(0)
	}

	// The context initialization takes care of parsing the configuration,
	// and creating connections to database, syslog, ...
	fmt.Fprintf(os.Stderr, "Initializing API context...")
	ctx, err = Init(*config) //ctx is a global variable
	if err != nil {
		fmt.Printf("\nFATAL: %v\n", err)
		os.Exit(9)
	}
	fmt.Fprintf(os.Stderr, "OK\n")
	ctx.Channels.Log <- mig.Log{Desc: "Context initialization done"}

	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("Unable to process logs")
			}
			// if ProcessLog says we should stop
			if stop {
				panic("Logger routine asked to stop")
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Logger routine started"}

	// register routes
	r := mux.NewRouter()
	s := r.PathPrefix(ctx.Server.BaseRoute).Subrouter()
	// unauthenticated endpoints
	s.HandleFunc("/heartbeat", getHeartbeat).Methods("GET")
	s.HandleFunc("/ip", getIP).Methods("GET")
	// all other resources require authentication
	s.HandleFunc("/", authenticate(getHome)).Methods("GET")
	s.HandleFunc("/search", authenticate(search)).Methods("GET")
	s.HandleFunc("/action", authenticate(getAction)).Methods("GET")
	s.HandleFunc("/action/create/", authenticate(describeCreateAction)).Methods("GET")
	s.HandleFunc("/action/create/", authenticate(createAction)).Methods("POST")
	s.HandleFunc("/command", authenticate(getCommand)).Methods("GET")
	s.HandleFunc("/agent", authenticate(getAgent)).Methods("GET")
	s.HandleFunc("/investigator", authenticate(getInvestigator)).Methods("GET")
	s.HandleFunc("/investigator/create/", authenticate(describeCreateInvestigator)).Methods("GET")
	s.HandleFunc("/investigator/create/", authenticate(createInvestigator)).Methods("POST")
	s.HandleFunc("/investigator/update/", authenticate(describeUpdateInvestigator)).Methods("GET")
	s.HandleFunc("/investigator/update/", authenticate(updateInvestigator)).Methods("POST")
	s.HandleFunc("/dashboard", authenticate(getDashboard)).Methods("GET")

	ctx.Channels.Log <- mig.Log{Desc: "Starting HTTP handler"}

	// all set, start the http handler
	http.Handle("/", context.ClearHandler(r))
	listenAddr := fmt.Sprintf("%s:%d", ctx.Server.IP, ctx.Server.Port)
	err = http.ListenAndServe(listenAddr, nil)
	if err != nil {
		panic(err)
	}
}
Exemplo n.º 6
0
// main initializes the mongodb connection, the directory watchers and the
// AMQP ctx.MQ.Chan. It also launches the goroutines.
func main() {
	// command line options
	var config = flag.String("c", "/etc/mig/scheduler.cfg", "Load configuration from file")
	flag.Parse()

	// The context initialization takes care of parsing the configuration,
	// and creating connections to database, message broker, syslog, ...
	fmt.Fprintf(os.Stderr, "Initializing Scheduler context...")
	ctx, err := Init(*config)
	if err != nil {
		panic(err)
	}
	fmt.Fprintf(os.Stderr, "OK\n")

	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("Unable to process logs")
			}
			// if ProcessLog says we should stop now, feed the Terminate chan
			if stop {
				ctx.Channels.Terminate <- errors.New(event.Desc)
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "mig.ProcessLog() routine started"}

	// Watch the data directories for new files
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		panic(err)
	}

	go watchDirectories(watcher, ctx)
	err = initWatchers(watcher, ctx)
	if err != nil {
		panic(err)
	}

	// Goroutine that loads actions dropped into ctx.Directories.Action.New
	go func() {
		for actionPath := range ctx.Channels.NewAction {
			ctx.OpID = mig.GenID()
			err := processNewAction(actionPath, ctx)
			// if something fails in the action processing, move it to the invalid folder
			if err != nil {
				// move action to INVALID folder and log
				dest := fmt.Sprintf("%s/%d.json", ctx.Directories.Action.Invalid, time.Now().UTC().UnixNano())
				os.Rename(actionPath, dest)
				reason := fmt.Sprintf("%v. %s moved to %s", err, actionPath, dest)
				ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: reason}.Warning()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "processNewAction() routine started"}

	// Goroutine that loads and sends commands dropped into ctx.Directories.Command.Ready
	go func() {
		for cmdPath := range ctx.Channels.CommandReady {
			ctx.OpID = mig.GenID()
			err := sendCommand(cmdPath, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "sendCommand() routine started"}

	// Goroutine that loads commands from the ctx.Directories.Command.Returned and marks
	// them as finished or cancelled
	go func() {
		for cmdPath := range ctx.Channels.CommandReturned {
			ctx.OpID = mig.GenID()
			err := terminateCommand(cmdPath, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "terminateCommand() routine started"}

	// Goroutine that updates an action when a command is done
	go func() {
		for cmdPath := range ctx.Channels.CommandDone {
			ctx.OpID = mig.GenID()
			err = updateAction(cmdPath, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "updateAction() routine started"}

	// Init is completed, restart queues of agents that have recent
	// keepalives in the database
	err = pickUpAliveAgents(ctx)
	if err != nil {
		panic(err)
	}

	// start a listening channel to receive keepalives from agents
	keepaliveChan, err := startKeepAliveChannel(ctx)
	if err != nil {
		panic(err)
	}

	// launch the routine that handles registrations
	go func() {
		for msg := range keepaliveChan {
			ctx.OpID = mig.GenID()
			err := getKeepAlives(msg, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Agent KeepAlive routine started"}

	// launch the routine that regularly walks through the local directories
	go func() {
		collectorSleeper, err := time.ParseDuration(ctx.Collector.Freq)
		if err != nil {
			panic(err)
		}
		for {
			ctx.OpID = mig.GenID()
			err := spoolInspection(ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
			time.Sleep(collectorSleeper)
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "spoolInspection() routine started"}

	// won't exit until this chan received something
	exitReason := <-ctx.Channels.Terminate
	fmt.Fprintf(os.Stderr, "Scheduler is shutting down. Reason: %s", exitReason)
	Destroy(ctx)
}
Exemplo n.º 7
0
// Init prepare the AMQP connections to the broker and launches the
// goroutines that will process commands received by the MIG Scheduler
func Init(foreground bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initAgent() -> %v", e)
		}
		ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug()
	}()

	// find out current working dir and build the bin path
	// it's important to do that before we daemonize, other the cwd will be /
	cdir, err := os.Getwd()
	if err != nil {
		panic(err)
	}
	ctx.Agent.BinPath = cdir + "/" + os.Args[0]

	// daemonize, and force logging to stdout
	if !foreground && LOGGINGCONF.Mode != "stdout" {
		LOGGINGCONF.Mode = "stdout"
		godaemon.MakeDaemon(&godaemon.DaemonAttr{})
	}

	// store heartbeat frequency
	ctx.Sleeper = HEARTBEATFREQ
	if err != nil {
		panic(err)
	}

	// create the go channels
	ctx, err = initChannels(ctx)
	if err != nil {
		panic(err)
	}

	// initiate logging configuration
	ctx.Logging, err = mig.InitLogger(LOGGINGCONF)
	if err != nil {
		panic(err)
	}

	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				fmt.Println("Unable to process logs")
			}
			// if ProcessLog says we should stop now, feed the Terminate chan
			if stop {
				ctx.Channels.Terminate <- fmt.Errorf(event.Desc)
			}
		}
	}()

	// retrieve information on agent environment
	ctx, err = initAgentEnv(ctx)
	if err != nil {
		panic(err)
	}

	// connect to the message broker
	ctx, err = initMQ(ctx)
	if err != nil {
		panic(err)
	}

	return
}
Exemplo n.º 8
0
func startRoutines(ctx Context) {
	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("Unable to process logs")
			}
			// if ProcessLog says we should stop now, feed the Terminate chan
			if stop {
				ctx.Channels.Terminate <- errors.New(event.Desc)
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "mig.ProcessLog() routine started"}

	// Goroutine that loads actions dropped into ctx.Directories.Action.New
	go func() {
		for actionPath := range ctx.Channels.NewAction {
			ctx.OpID = mig.GenID()
			err := processNewAction(actionPath, ctx)
			// if something fails in the action processing, move it to the invalid folder
			if err != nil {
				// move action to INVALID folder and log
				dest := fmt.Sprintf("%s/%d.json", ctx.Directories.Action.Invalid, time.Now().UTC().UnixNano())
				os.Rename(actionPath, dest)
				reason := fmt.Sprintf("%v. '%s' moved to '%s'", err, actionPath, dest)
				ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: reason}.Warning()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "processNewAction() routine started"}

	// Goroutine that loads and sends commands dropped in ready state
	// it uses a select and a timeout to load a batch of commands instead of
	// sending them one by one
	go func() {
		ctx.OpID = mig.GenID()
		readyCmd := make(map[float64]mig.Command)
		ctr := 0
		for {
			select {
			case cmd := <-ctx.Channels.CommandReady:
				ctr++
				readyCmd[cmd.ID] = cmd
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmds []mig.Command
					for id, cmd := range readyCmd {
						cmds = append(cmds, cmd)
						delete(readyCmd, id)
					}
					err := sendCommands(cmds, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "sendCommands() routine started"}

	// Goroutine that loads commands from the ctx.Directories.Command.Returned and marks
	// them as finished or cancelled
	go func() {
		ctx.OpID = mig.GenID()
		returnedCmd := make(map[uint64]string)
		var ctr uint64 = 0
		for {
			select {
			case cmdFile := <-ctx.Channels.CommandReturned:
				ctr++
				returnedCmd[ctr] = cmdFile
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmdFiles []string
					for id, cmdFile := range returnedCmd {
						cmdFiles = append(cmdFiles, cmdFile)
						delete(returnedCmd, id)
					}
					err := returnCommands(cmdFiles, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "terminateCommand() routine started"}

	// Goroutine that updates an action when a command is done
	go func() {
		ctx.OpID = mig.GenID()
		doneCmd := make(map[float64]mig.Command)
		ctr := 0
		for {
			select {
			case cmd := <-ctx.Channels.CommandDone:
				ctr++
				doneCmd[cmd.ID] = cmd
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmds []mig.Command
					for id, cmd := range doneCmd {
						cmds = append(cmds, cmd)
						delete(doneCmd, id)
					}
					err := updateAction(cmds, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}

	}()
	ctx.Channels.Log <- mig.Log{Desc: "updateAction() routine started"}

	// start a listening channel to receive heartbeats from agents
	heartbeatsChan, err := startHeartbeatsListener(ctx)
	if err != nil {
		panic(err)
	}
	go func() {
		for msg := range heartbeatsChan {
			ctx.OpID = mig.GenID()
			err := getHeartbeats(msg, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("heartbeat routine failed with error '%v'", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "agents heartbeats listener routine started"}

	// start a listening channel to results from agents
	agtResultsChan, err := startResultsListener(ctx)
	if err != nil {
		panic(err)
	}
	go func() {
		for delivery := range agtResultsChan {
			ctx.OpID = mig.GenID()
			// validate the size of the data received, and make sure its first and
			// last bytes are valid json enclosures. if not, discard the message.
			if len(delivery.Body) < 10 || delivery.Body[0] != '{' || delivery.Body[len(delivery.Body)-1] != '}' {
				ctx.Channels.Log <- mig.Log{
					OpID: ctx.OpID,
					Desc: fmt.Sprintf("discarding invalid message received in results channel"),
				}.Err()
				continue
			}
			// write to disk in Returned directory, discard and continue on failure
			dest := fmt.Sprintf("%s/%.0f", ctx.Directories.Command.Returned, ctx.OpID)
			err = safeWrite(ctx, dest, delivery.Body)
			if err != nil {
				ctx.Channels.Log <- mig.Log{
					OpID: ctx.OpID,
					Desc: fmt.Sprintf("failed to write agent results to disk: %v", err),
				}.Err()
				continue
			}
			// publish an event in the command results queue
			err = sendEvent(event.Q_Cmd_Res, delivery.Body, ctx)
			if err != nil {
				panic(err)
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "agents results listener routine started"}

	// launch the routine that regularly walks through the local directories
	go func() {
		collectorSleeper, err := time.ParseDuration(ctx.Collector.Freq)
		if err != nil {
			panic(err)
		}
		for {
			ctx.OpID = mig.GenID()
			err := collector(ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("collector routined failed with error '%v'", err)}.Err()
			}
			time.Sleep(collectorSleeper)
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "collector routine started"}

	// launch the routine that periodically runs jobs
	go func() {
		periodicSleeper, err := time.ParseDuration(ctx.Periodic.Freq)
		if err != nil {
			panic(err)
		}
		for {
			ctx.OpID = mig.GenID()
			err := periodic(ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("period routine failed with error '%v'", err)}.Err()
			}
			time.Sleep(periodicSleeper)
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "periodic routine started"}

	// launch the routine that cleans up unused amqp queues
	go func() {
		sleeper, err := time.ParseDuration(ctx.Periodic.QueuesCleanupFreq)
		if err != nil {
			panic(err)
		}
		for {
			ctx.OpID = mig.GenID()
			err = QueuesCleanup(ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("queues cleanup routine failed with error '%v'", err)}.Err()
			}
			time.Sleep(sleeper)
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "queue cleanup routine started"}

	// launch the routine that handles multi agents on same queue
	if ctx.Agent.KillDupAgents {
		go func() {
			for queueLoc := range ctx.Channels.DetectDupAgents {
				ctx.OpID = mig.GenID()
				err = killDupAgents(queueLoc, ctx)
				if err != nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
				}
			}
		}()
		ctx.Channels.Log <- mig.Log{Desc: "killDupAgents() routine started"}
	}

	// block here until a terminate message is received
	exitReason := <-ctx.Channels.Terminate
	fmt.Fprintf(os.Stderr, "Scheduler is shutting down. Reason: %s", exitReason)
	Destroy(ctx)
	return
}
Exemplo n.º 9
0
// main initializes the mongodb connection, the directory watchers and the
// AMQP ctx.MQ.Chan. It also launches the goroutines.
func main() {
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)

	// command line options
	var config = flag.String("c", "/etc/mig/scheduler.cfg", "Load configuration from file")
	flag.Parse()

	// The context initialization takes care of parsing the configuration,
	// and creating connections to database, message broker, syslog, ...
	fmt.Fprintf(os.Stderr, "Initializing Scheduler context...")
	ctx, err := Init(*config)
	if err != nil {
		panic(err)
	}
	fmt.Fprintf(os.Stderr, "OK\n")

	// Goroutine that handles events, such as logs and panics,
	// and decides what to do with them
	go func() {
		for event := range ctx.Channels.Log {
			stop, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("Unable to process logs")
			}
			// if ProcessLog says we should stop now, feed the Terminate chan
			if stop {
				ctx.Channels.Terminate <- errors.New(event.Desc)
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "mig.ProcessLog() routine started"}

	// Watch the data directories for new files
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		panic(err)
	}

	go watchDirectories(watcher, ctx)
	// TODO: looks like the watchers are lost after a while. the (ugly) loop
	// below reinits the watchers every 137 seconds to prevent that from happening
	go func() {
		for {
			err = initWatchers(watcher, ctx)
			if err != nil {
				panic(err)
			}
			time.Sleep(137 * time.Second)
		}
	}()

	// Goroutine that loads actions dropped into ctx.Directories.Action.New
	go func() {
		for actionPath := range ctx.Channels.NewAction {
			ctx.OpID = mig.GenID()
			err := processNewAction(actionPath, ctx)
			// if something fails in the action processing, move it to the invalid folder
			if err != nil {
				// move action to INVALID folder and log
				dest := fmt.Sprintf("%s/%d.json", ctx.Directories.Action.Invalid, time.Now().UTC().UnixNano())
				os.Rename(actionPath, dest)
				reason := fmt.Sprintf("%v. '%s' moved to '%s'", err, actionPath, dest)
				ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: reason}.Warning()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "processNewAction() routine started"}

	// Goroutine that loads and sends commands dropped in ready state
	// it uses a select and a timeout to load a batch of commands instead of
	// sending them one by one
	go func() {
		ctx.OpID = mig.GenID()
		readyCmd := make(map[float64]mig.Command)
		ctr := 0
		for {
			select {
			case cmd := <-ctx.Channels.CommandReady:
				ctr++
				readyCmd[cmd.ID] = cmd
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmds []mig.Command
					for id, cmd := range readyCmd {
						cmds = append(cmds, cmd)
						delete(readyCmd, id)
					}
					err := sendCommands(cmds, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "sendCommands() routine started"}

	// Goroutine that loads commands from the ctx.Directories.Command.Returned and marks
	// them as finished or cancelled
	go func() {
		ctx.OpID = mig.GenID()
		returnedCmd := make(map[uint64]string)
		var ctr uint64 = 0
		for {
			select {
			case cmdFile := <-ctx.Channels.CommandReturned:
				ctr++
				returnedCmd[ctr] = cmdFile
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmdFiles []string
					for id, cmdFile := range returnedCmd {
						cmdFiles = append(cmdFiles, cmdFile)
						delete(returnedCmd, id)
					}
					err := returnCommands(cmdFiles, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "terminateCommand() routine started"}

	// Goroutine that updates an action when a command is done
	go func() {
		ctx.OpID = mig.GenID()
		doneCmd := make(map[float64]mig.Command)
		ctr := 0
		for {
			select {
			case cmd := <-ctx.Channels.CommandDone:
				ctr++
				doneCmd[cmd.ID] = cmd
			case <-time.After(1 * time.Second):
				if ctr > 0 {
					var cmds []mig.Command
					for id, cmd := range doneCmd {
						cmds = append(cmds, cmd)
						delete(doneCmd, id)
					}
					err := updateAction(cmds, ctx)
					if err != nil {
						ctx.Channels.Log <- mig.Log{OpID: ctx.OpID, Desc: fmt.Sprintf("%v", err)}.Err()
					}
				}
				// reinit
				ctx.OpID = mig.GenID()
				ctr = 0
			}
		}

	}()
	ctx.Channels.Log <- mig.Log{Desc: "updateAction() routine started"}

	// Init is completed, restart queues of active agents
	err = pickUpAliveAgents(ctx)
	if err != nil {
		panic(err)
	}

	// start a listening channel to receive heartbeats from agents
	activeAgentsChan, err := startActiveAgentsChannel(ctx)
	if err != nil {
		panic(err)
	}

	// launch the routine that handles registrations
	go func() {
		for msg := range activeAgentsChan {
			ctx.OpID = mig.GenID()
			err := getHeartbeats(msg, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Agent KeepAlive routine started"}

	// launch the routine that regularly walks through the local directories
	go func() {
		collectorSleeper, err := time.ParseDuration(ctx.Collector.Freq)
		if err != nil {
			panic(err)
		}
		for {
			ctx.OpID = mig.GenID()
			err := spoolInspection(ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
			time.Sleep(collectorSleeper)
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "spoolInspection() routine started"}

	// launch the routine that handles multi agents on same queue
	go func() {
		for queueLoc := range ctx.Channels.DetectDupAgents {
			ctx.OpID = mig.GenID()
			err = inspectMultiAgents(queueLoc, ctx)
			if err != nil {
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("%v", err)}.Err()
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "inspectMultiAgents() routine started"}

	// won't exit until this chan received something
	exitReason := <-ctx.Channels.Terminate
	fmt.Fprintf(os.Stderr, "Scheduler is shutting down. Reason: %s", exitReason)
	Destroy(ctx)
}
Exemplo n.º 10
0
Arquivo: main.go Projeto: igofman/mig
func main() {
	var (
		err  error
		conf Config
		hint gozdef.HostAssetHint
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker that listens to new endpoints and sends them as assets to mozdef\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/agent-intel-worker.cfg", "Load configuration from file")
	flag.Parse()
	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}

	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}

	// bind to the MIG even queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_New)
	if err != nil {
		panic(err)
	}

	// bind to the mozdef relay exchange
	gp, err := gozdef.InitAmqp(conf.MozDef)
	if err != nil {
		panic(err)
	}

	mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Agt_New})
	for event := range consumerChan {
		var agt mig.Agent
		err = json.Unmarshal(event.Body, &agt)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid agent description: %v", err)}.Err())
			continue
		}
		agt, err = populateTeam(agt, conf)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to populate agent team: %v", err)}.Err())
		}
		hint, err = makeHintFromAgent(agt)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to build asset hint: %v", err)}.Err())
			continue
		}
		err = publishHintToMozdef(hint, gp)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err())
			// if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one
			panic(err)
		}
		mig.ProcessLog(logctx, mig.Log{Desc: "published asset hint for agent '" + hint.Name + "' to mozdef"}.Info())
	}
	return
}