Ejemplo n.º 1
0
Archivo: main.go Proyecto: jvehent/mig
func main() {
	var (
		err  error
		conf Config
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker verifying agents that fail to authenticate\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/agent-verif-worker.cfg", "Load configuration from file")
	flag.Parse()
	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}
	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}
	// set a binding to route events from mig.Ev_Q_Agt_Auth_Fail into the queue named after the worker
	// and return a channel that consumes the queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_Auth_Fail)
	if err != nil {
		panic(err)
	}
	fmt.Println("started worker", workerName, "consuming queue", workerQueue, "from key", mig.Ev_Q_Agt_Auth_Fail)
	for event := range consumerChan {
		mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("unverified agent '%s'", event.Body)})
	}
	return
}
Ejemplo n.º 2
0
// Init() initializes a context from a configuration file into an
// existing context struct
func Init(path string, debug bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("Init() -> %v", e)
		}
	}()
	ctx.Channels.Log = make(chan mig.Log, 37)

	err = gcfg.ReadFileInto(&ctx, path)
	if err != nil {
		panic(err)
	}

	ctx.Server.BaseURL = ctx.Server.Host + ctx.Server.BaseRoute
	ctx.Authentication.duration, err = time.ParseDuration(ctx.Authentication.TokenDuration)
	if err != nil {
		panic(err)
	}

	// Set the mode we will use to determine a client's public IP address
	if ctx.Server.ClientPublicIP == "" {
		ctx.Server.ClientPublicIP = "peer"
	}
	ctx.Server.ClientPublicIPOffset, err = parseClientPublicIP(ctx.Server.ClientPublicIP)
	if err != nil {
		fmt.Println(err)
		panic(err)
	}

	if debug {
		ctx.Logging.Level = "debug"
		ctx.Logging.Mode = "stdout"
	}
	ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-api")
	if err != nil {
		panic(err)
	}

	if ctx.Manifest.RequiredSignatures < 1 {
		panic("manifest:requiredsignatures must be at least 1 in config file")
	}

	ctx, err = initDB(ctx)
	if err != nil {
		panic(err)
	}

	if ctx.MaxMind.Path != "" {
		ctx.MaxMind.r, err = geo.Open(ctx.MaxMind.Path)
		if err != nil {
			panic(err)
		}
	}
	return
}
Ejemplo n.º 3
0
func initContext() (err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initContext() -> %v", e)
		}
	}()

	ctx.Channels.Log = make(chan mig.Log, 37)
	ctx.Logging, err = getLoggingConf()
	if err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
	ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-loader")
	if err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
	wg.Add(1)
	go func() {
		var stop bool
		for event := range ctx.Channels.Log {
			// Also write the message to stderr to ease debugging
			fmt.Fprintf(os.Stderr, "%v\n", event.Desc)
			stop, err = mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				panic("unable to process log")
			}
			if stop {
				break
			}
		}
		wg.Done()
	}()
	logInfo("logging routine started")

	ctx.LoaderKey = LOADERKEY

	hints := agentcontext.AgentContextHints{
		DiscoverPublicIP: DISCOVERPUBLICIP,
		DiscoverAWSMeta:  DISCOVERAWSMETA,
		APIUrl:           APIURL,
		Proxies:          PROXIES[:],
	}
	actx, err := agentcontext.NewAgentContext(ctx.Channels.Log, hints)
	if err != nil {
		panic(err)
	}
	ctx.AgentIdentifier = actx.ToAgent()

	return
}
Ejemplo n.º 4
0
// Init() initializes a context from a configuration file into an
// existing context struct
func Init(path string) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("Init() -> %v", e)
		}
	}()

	err = gcfg.ReadFileInto(&ctx, path)
	if err != nil {
		panic(err)
	}

	ctx, err = initChannels(ctx)
	if err != nil {
		panic(err)
	}

	ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-scheduler")
	if err != nil {
		panic(err)
	}

	ctx, err = initDirectories(ctx)
	if err != nil {
		panic(err)
	}

	ctx, err = initDB(ctx)
	if err != nil {
		panic(err)
	}

	ctx, err = initRelay(ctx)
	if err != nil {
		panic(err)
	}

	ctx, err = initSecring(ctx)
	if err != nil {
		panic(err)
	}

	return
}
Ejemplo n.º 5
0
// Init() initializes a context from a configuration file into an
// existing context struct
func Init(path string, debug bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("Init() -> %v", e)
		}
	}()
	ctx.Channels.Log = make(chan mig.Log, 37)

	err = gcfg.ReadFileInto(&ctx, path)
	if err != nil {
		panic(err)
	}

	ctx.Server.BaseURL = ctx.Server.Host + ctx.Server.BaseRoute
	ctx.Authentication.duration, err = time.ParseDuration(ctx.Authentication.TokenDuration)
	if err != nil {
		panic(err)
	}

	if debug {
		ctx.Logging.Level = "debug"
		ctx.Logging.Mode = "stdout"
	}
	ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-api")
	if err != nil {
		panic(err)
	}

	ctx, err = initDB(ctx)
	if err != nil {
		panic(err)
	}

	if ctx.MaxMind.Path != "" {
		ctx.MaxMind.r, err = geo.Open(ctx.MaxMind.Path)
		if err != nil {
			panic(err)
		}
	}
	return
}
Ejemplo n.º 6
0
func initContext(config string) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initContext() -> %v", e)
		}
	}()

	ctx = Context{}
	ctx.Channels.Log = make(chan mig.Log, 37)
	ctx.Channels.Results = make(chan mig.RunnerResult, 64)
	ctx.Channels.ExitNotify = make(chan bool, 64)
	ctx.Entities = make(map[string]*entity)
	err = gcfg.ReadFileInto(&ctx, config)
	if err != nil {
		panic(err)
	}

	ctx.Runner.RunDirectory = path.Join(ctx.Runner.Directory, "runners")
	ctx.Runner.PluginDirectory = path.Join(ctx.Runner.Directory, "plugins")

	if ctx.Client.ClientConfPath == "default" {
		hdir := client.FindHomedir()
		ctx.Client.ClientConfPath = path.Join(hdir, ".migrc")
	}
	ctx.ClientConf, err = client.ReadConfiguration(ctx.Client.ClientConfPath)
	if err != nil {
		panic(err)
	}

	if ctx.Client.Passphrase != "" {
		client.ClientPassphrase(ctx.Client.Passphrase)
	}

	ctx.Logging, err = mig.InitLogger(ctx.Logging, "mig-runner")
	if err != nil {
		panic(err)
	}

	return ctx, nil
}
Ejemplo n.º 7
0
// Init prepare the AMQP connections to the broker and launches the
// goroutines that will process commands received by the MIG Scheduler
func Init(foreground, upgrade bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initAgent() -> %v", e)
		}
		if ctx.Channels.Log != nil {
			ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug()
		}
	}()
	// Pick up a lock on Context Agent field as we will be updating or reading it here and in
	// various functions called from here such as daemonize().
	ctx.Agent.Lock()
	defer ctx.Agent.Unlock()

	ctx.Agent.Tags = TAGS

	ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent")
	if err != nil {
		panic(err)
	}
	// create the go channels
	ctx, err = initChannels(ctx)
	if err != nil {
		panic(err)
	}
	// Logging GoRoutine,
	go func() {
		for event := range ctx.Channels.Log {
			_, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				fmt.Println("Unable to process logs")
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug()

	// Gather new agent context information to use as the context for this
	// agent invocation
	hints := agentcontext.AgentContextHints{
		DiscoverPublicIP: DISCOVERPUBLICIP,
		DiscoverAWSMeta:  DISCOVERAWSMETA,
		APIUrl:           APIURL,
		Proxies:          PROXIES[:],
	}
	actx, err := agentcontext.NewAgentContext(ctx.Channels.Log, hints)
	if err != nil {
		panic(err)
	}

	// defines whether the agent should respawn itself or not
	// this value is overriden in the daemonize calls if the agent
	// is controlled by systemd, upstart or launchd
	ctx.Agent.Respawn = ISIMMORTAL

	// Do initial assignment of values which could change over the lifetime
	// of the agent process
	ctx.updateVolatileFromAgentContext(actx)

	// Set some other values obtained from the agent context which will not
	// change while the process is running.
	ctx.Agent.RunDir = actx.RunDir
	ctx.Agent.BinPath = actx.BinPath

	// get the agent ID
	ctx, err = initAgentID(ctx)
	if err != nil {
		panic(err)
	}

	// build the agent message queue location
	ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s", ctx.Agent.Env.OS, ctx.Agent.UID)

	// daemonize if not in foreground mode
	if !foreground {
		// give one second for the caller to exit
		time.Sleep(time.Second)
		ctx, err = daemonize(ctx, upgrade)
		if err != nil {
			panic(err)
		}
	}

	ctx.Sleeper = HEARTBEATFREQ
	if err != nil {
		panic(err)
	}

	// parse the ACLs
	ctx, err = initACL(ctx)
	if err != nil {
		panic(err)
	}

	connected := false
	// connect to the message broker
	//
	// If any proxies have been configured, we try to use those first. If they fail, or
	// no proxies have been setup, just attempt a direct connection.
	for _, proxy := range PROXIES {
		ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Trying proxy %v for relay connection", proxy)}.Debug()
		ctx, err = initMQ(ctx, true, proxy)
		if err != nil {
			ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Info()
			continue
		}
		connected = true
		goto mqdone
	}
	// Try and proxy that has been specified in the environment
	ctx.Channels.Log <- mig.Log{Desc: "Trying proxies from environment for relay connection"}.Debug()
	ctx, err = initMQ(ctx, true, "")
	if err == nil {
		connected = true
		goto mqdone
	} else {
		ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Info()
	}
	// Fall back to a direct connection
	ctx.Channels.Log <- mig.Log{Desc: "Trying direct relay connection"}.Debug()
	ctx, err = initMQ(ctx, false, "")
	if err == nil {
		connected = true
	} else {
		ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Info()
	}
mqdone:
	if !connected {
		panic("Failed to connect to the relay")
	}

	// catch interrupts
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	go func() {
		sig := <-c
		ctx.Channels.Terminate <- sig.String()
	}()

	// try to connect the stat socket until it works
	// this may fail if one agent is already running
	if SOCKET != "" {
		go func() {
			for {
				ctx.Socket.Bind = SOCKET
				ctx, err = initSocket(ctx)
				if err == nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info()
					goto socketdone
				}
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err()
				time.Sleep(60 * time.Second)
			}
		socketdone:
			return
		}()
	}

	return
}
Ejemplo n.º 8
0
func main() {
	var (
		err  error
		conf Config
		hint gozdef.HostAssetHint
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker that listens to new endpoints and sends them as assets to mozdef\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/agent-intel-worker.cfg", "Load configuration from file")
	var showversion = flag.Bool("V", false, "Show build version and exit")
	flag.Parse()
	if *showversion {
		fmt.Println(mig.Version)
		os.Exit(0)
	}
	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}

	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}

	// bind to the MIG even queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Agt_New)
	if err != nil {
		panic(err)
	}

	// bind to the mozdef relay exchange
	gp, err := gozdef.InitAmqp(conf.MozDef)
	if err != nil {
		panic(err)
	}

	mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Agt_New})
	for event := range consumerChan {
		var agt mig.Agent
		err = json.Unmarshal(event.Body, &agt)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid agent description: %v", err)}.Err())
			continue
		}
		agt, err = populateTeam(agt, conf)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to populate agent team: %v", err)}.Err())
		}
		hint, err = makeHintFromAgent(agt)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to build asset hint: %v", err)}.Err())
			continue
		}
		err = publishHintToMozdef(hint, gp)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err())
			// if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one
			panic(err)
		}
		mig.ProcessLog(logctx, mig.Log{Desc: "published asset hint for agent '" + hint.Name + "' to mozdef"}.Info())
	}
	return
}
Ejemplo n.º 9
0
Archivo: main.go Proyecto: keoni161/mig
func main() {
	var (
		err   error
		conf  Config
		items []gozdef.ComplianceItem
	)
	flag.Usage = func() {
		fmt.Fprintf(os.Stderr, "%s - a worker that transform commands results into compliance items and publishes them to mozdef\n", os.Args[0])
		flag.PrintDefaults()
	}
	var configPath = flag.String("c", "/etc/mig/compliance-item-worker.cfg", "Load configuration from file")
	var showversion = flag.Bool("V", false, "Show build version and exit")
	flag.Parse()
	if *showversion {
		fmt.Println(mig.Version)
		os.Exit(0)
	}

	err = gcfg.ReadFileInto(&conf, *configPath)
	if err != nil {
		panic(err)
	}

	logctx, err := mig.InitLogger(conf.Logging, workerName)
	if err != nil {
		panic(err)
	}

	// bind to the MIG even queue
	workerQueue := "migevent.worker." + workerName
	consumerChan, err := workers.InitMqWithConsumer(conf.Mq, workerQueue, mig.Ev_Q_Cmd_Res)
	if err != nil {
		panic(err)
	}

	// bind to the mozdef relay exchange
	gp, err := gozdef.InitAmqp(conf.MozDef)
	if err != nil {
		panic(err)
	}

	mig.ProcessLog(logctx, mig.Log{Desc: "worker started, consuming queue " + workerQueue + " from key " + mig.Ev_Q_Cmd_Res})
	tFamRe := regexp.MustCompile("(?i)^compliance$")
	for event := range consumerChan {
		var cmd mig.Command
		err = json.Unmarshal(event.Body, &cmd)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("invalid command: %v", err)}.Err())
		}
		// discard actions that aren't threat.family=compliance
		if !tFamRe.MatchString(cmd.Action.Threat.Family) {
			continue
		}
		items, err = makeComplianceItem(cmd, conf)
		if err != nil {
			mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make compliance items: %v", err)}.Err())
		}
		for _, item := range items {
			// create a new event and set values in the fields
			ev, err := gozdef.NewEvent()
			if err != nil {
				mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to make new mozdef event: %v", err)}.Err())
			}
			ev.Category = "complianceitems"
			ev.Source = "mig"
			cverb := "fails"
			if item.Compliance {
				cverb = "passes"
			}
			ev.Summary = fmt.Sprintf("%s %s compliance with %s", item.Target, cverb, item.Check.Ref)
			ev.Tags = append(ev.Tags, "mig")
			ev.Tags = append(ev.Tags, "compliance")
			ev.Info()
			ev.Details = item
			err = gp.Send(ev)
			if err != nil {
				mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("failed to publish to mozdef: %v", err)}.Err())
				// if publication to mozdef fails, crash the worker. systemd/upstart will restart a new one
				panic(err)
			}
		}
		mig.ProcessLog(logctx, mig.Log{Desc: fmt.Sprintf("published %d items from command %.0f to mozdef", len(items), cmd.ID)}.Info())
	}
	return
}
Ejemplo n.º 10
0
// Init prepare the AMQP connections to the broker and launches the
// goroutines that will process commands received by the MIG Scheduler
func Init(foreground, upgrade bool) (ctx Context, err error) {
	defer func() {
		if e := recover(); e != nil {
			err = fmt.Errorf("initAgent() -> %v", e)
		}
		ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug()
	}()
	ctx.Agent.Tags = TAGS

	ctx.Logging, err = mig.InitLogger(LOGGINGCONF, "mig-agent")
	if err != nil {
		panic(err)
	}
	// create the go channels
	ctx, err = initChannels(ctx)
	if err != nil {
		panic(err)
	}
	// Logging GoRoutine,
	go func() {
		for event := range ctx.Channels.Log {
			_, err := mig.ProcessLog(ctx.Logging, event)
			if err != nil {
				fmt.Println("Unable to process logs")
			}
		}
	}()
	ctx.Channels.Log <- mig.Log{Desc: "Logging routine initialized."}.Debug()

	// defines whether the agent should respawn itself or not
	// this value is overriden in the daemonize calls if the agent
	// is controlled by systemd, upstart or launchd
	ctx.Agent.Respawn = ISIMMORTAL

	// get the path of the executable
	ctx.Agent.BinPath, err = osext.Executable()
	if err != nil {
		panic(err)
	}

	// retrieve the hostname
	ctx, err = findHostname(ctx)
	if err != nil {
		panic(err)
	}

	// retrieve information about the operating system
	ctx.Agent.Env.OS = runtime.GOOS
	ctx.Agent.Env.Arch = runtime.GOARCH
	ctx, err = findOSInfo(ctx)
	if err != nil {
		panic(err)
	}
	ctx, err = findLocalIPs(ctx)
	if err != nil {
		panic(err)
	}

	// Attempt to discover the public IP
	if DISCOVERPUBLICIP {
		ctx, err = findPublicIP(ctx)
		if err != nil {
			panic(err)
		}
	}

	// find the run directory
	ctx.Agent.RunDir = getRunDir()

	// get the agent ID
	ctx, err = initAgentID(ctx)
	if err != nil {
		panic(err)
	}

	// build the agent message queue location
	ctx.Agent.QueueLoc = fmt.Sprintf("%s.%s.%s", ctx.Agent.Env.OS, ctx.Agent.Hostname, ctx.Agent.UID)

	// daemonize if not in foreground mode
	if !foreground {
		// give one second for the caller to exit
		time.Sleep(time.Second)
		ctx, err = daemonize(ctx, upgrade)
		if err != nil {
			panic(err)
		}
	}

	ctx.Sleeper = HEARTBEATFREQ
	if err != nil {
		panic(err)
	}

	// parse the ACLs
	ctx, err = initACL(ctx)
	if err != nil {
		panic(err)
	}

	connected := false
	// connect to the message broker
	ctx, err = initMQ(ctx, false, "")
	if err != nil {
		ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay directly: '%v'", err)}.Debug()
		// if the connection failed, look for a proxy
		// in the environment variables, and try again
		ctx, err = initMQ(ctx, true, "")
		if err != nil {
			ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using HTTP_PROXY: '%v'", err)}.Debug()
			// still failing, try connecting using the proxies in the configuration
			for _, proxy := range PROXIES {
				ctx, err = initMQ(ctx, true, proxy)
				if err != nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect to relay using proxy %s: '%v'", proxy, err)}.Debug()
					continue
				}
				connected = true
				goto mqdone
			}
		} else {
			connected = true
		}
	} else {
		connected = true
	}
mqdone:
	if !connected {
		panic("Failed to connect to the relay")
	}

	// catch interrupts
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	go func() {
		sig := <-c
		ctx.Channels.Terminate <- sig.String()
	}()

	// try to connect the stat socket until it works
	// this may fail if one agent is already running
	if SOCKET != "" {
		go func() {
			for {
				ctx.Socket.Bind = SOCKET
				ctx, err = initSocket(ctx)
				if err == nil {
					ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Stat socket connected successfully on %s", ctx.Socket.Bind)}.Info()
					goto socketdone
				}
				ctx.Channels.Log <- mig.Log{Desc: fmt.Sprintf("Failed to connect stat socket: '%v'", err)}.Err()
				time.Sleep(60 * time.Second)
			}
		socketdone:
			return
		}()
	}

	return
}