// REST API instances that are started are killed when the tests end. // When we eventually have a REST API Stop command this can be killed. func startAPI() string { // Start a REST API to talk to rest.StreamingBufferWindow = 0.01 log.SetLevel(LOG_LEVEL) r, _ := rest.New(rest.GetDefaultConfig()) c := control.New(control.GetDefaultConfig()) c.Start() s := scheduler.New(scheduler.GetDefaultConfig()) s.SetMetricManager(c) s.Start() r.BindConfigManager(c.Config) r.BindMetricManager(c) r.BindTaskManager(s) go func(ch <-chan error) { // Block on the error channel. Will return exit status 1 for an error or just return if the channel closes. err, ok := <-ch if !ok { return } log.Fatal(err) }(r.Err()) r.SetAddress("127.0.0.1:0") r.Start() time.Sleep(100 * time.Millisecond) return fmt.Sprintf("http://localhost:%d", r.Port()) }
// REST API instances that are started are killed when the tests end. // When we eventually have a REST API Stop command this can be killed. func startAPI(opts ...interface{}) *restAPIInstance { // Start a REST API to talk to log.SetLevel(LOG_LEVEL) r, _ := New(false, "", "") controlOpts := []control.PluginControlOpt{} for _, opt := range opts { switch t := opt.(type) { case control.PluginControlOpt: controlOpts = append(controlOpts, t) } } c := control.New(controlOpts...) c.Start() s := scheduler.New() s.SetMetricManager(c) s.Start() r.BindMetricManager(c) r.BindTaskManager(s) r.BindConfigManager(c.Config) go func(ch <-chan error) { // Block on the error channel. Will return exit status 1 for an error or just return if the channel closes. err, ok := <-ch if !ok { return } log.Fatal(err) }(r.Err()) r.Start("127.0.0.1:0") time.Sleep(time.Millisecond * 100) return &restAPIInstance{ port: r.Port(), server: r, } }
// REST API instances that are started are killed when the tests end. // When we eventually have a REST API Stop command this can be killed. func startAPI(cfg *mockConfig) *restAPIInstance { // Start a REST API to talk to log.SetLevel(LOG_LEVEL) r, _ := New(cfg.RestAPI) c := control.New(cfg.Control) c.Start() s := scheduler.New(cfg.Scheduler) s.SetMetricManager(c) s.Start() r.BindMetricManager(c) r.BindTaskManager(s) r.BindConfigManager(c.Config) go func(ch <-chan error) { // Block on the error channel. Will return exit status 1 for an error or just return if the channel closes. err, ok := <-ch if !ok { return } log.Fatal(err) }(r.Err()) r.SetAddress("127.0.0.1", 0) r.Start() time.Sleep(time.Millisecond * 100) return &restAPIInstance{ port: r.Port(), server: r, } }
// REST API instances that are started are killed when the tests end. // When we eventually have a REST API Stop command this can be killed. func startAPI(opts ...interface{}) *restAPIInstance { // Start a REST API to talk to log.SetLevel(LOG_LEVEL) r, _ := New(false, "", "") controlOpts := []control.ControlOpt{} for _, opt := range opts { switch t := opt.(type) { case control.ControlOpt: controlOpts = append(controlOpts, t) } } c := control.New(controlOpts...) c.Start() s := scheduler.New() s.SetMetricManager(c) s.Start() r.BindMetricManager(c) r.BindTaskManager(s) r.BindConfigManager(c.Config) err := r.Start("127.0.0.1:0") if err != nil { // Panic on an error panic(err) } time.Sleep(time.Millisecond * 100) return &restAPIInstance{ port: r.Port(), server: r, } }
// returns an array of the mgtports and the tribe port for the last node func startTribes(count int, seed string) ([]int, int) { var wg sync.WaitGroup var tribePort int var mgtPorts []int for i := 0; i < count; i++ { mgtPort := getAvailablePort() mgtPorts = append(mgtPorts, mgtPort) tribePort = getAvailablePort() conf := tribe.DefaultConfig(fmt.Sprintf("member-%v", mgtPort), "127.0.0.1", tribePort, seed, mgtPort) // conf.MemberlistConfig.PushPullInterval = 5 * time.Second conf.MemberlistConfig.RetransmitMult = conf.MemberlistConfig.RetransmitMult * 2 if seed == "" { seed = fmt.Sprintf("%s:%d", "127.0.0.1", tribePort) } t, err := tribe.New(conf) if err != nil { panic(err) } c := control.New() c.RegisterEventHandler("tribe", t) c.Start() s := scheduler.New() s.SetMetricManager(c) s.RegisterEventHandler("tribe", t) s.Start() t.SetPluginCatalog(c) t.SetTaskManager(s) t.Start() r, _ := New(false, "", "") r.BindMetricManager(c) r.BindTaskManager(s) r.BindTribeManager(t) r.Start(":" + strconv.Itoa(mgtPort)) wg.Add(1) timer := time.After(10 * time.Second) go func(port int) { defer wg.Done() for { select { case <-timer: panic("timed out") default: time.Sleep(100 * time.Millisecond) resp := getMembers(port) if resp.Meta.Code == 200 && len(resp.Body.(*rbody.TribeMemberList).Members) >= count { restLogger.Infof("num of members %v", len(resp.Body.(*rbody.TribeMemberList).Members)) return } } } }(mgtPort) } wg.Wait() return mgtPorts, tribePort }
func TestCollectPublishWorkflow(t *testing.T) { log.SetLevel(log.FatalLevel) Convey("Given a started plugin control", t, func() { c := control.New() c.Start() s := New() s.SetMetricManager(c) Convey("create a workflow", func() { rp, err := core.NewRequestedPlugin(snap_collector_mock2_path) So(err, ShouldBeNil) _, err = c.Load(rp) So(err, ShouldBeNil) rp2, err := core.NewRequestedPlugin(snap_publisher_file_path) So(err, ShouldBeNil) _, err = c.Load(rp2) So(err, ShouldBeNil) rp3, err := core.NewRequestedPlugin(snap_processor_passthru_path) So(err, ShouldBeNil) _, err = c.Load(rp3) So(err, ShouldBeNil) time.Sleep(100 * time.Millisecond) metrics, err2 := c.MetricCatalog() So(err2, ShouldBeNil) So(metrics, ShouldNotBeEmpty) w := wmap.NewWorkflowMap() w.CollectNode.AddMetric("/intel/mock/foo", 2) w.CollectNode.AddConfigItem("/intel/mock/foo", "password", "secret") pu := wmap.NewPublishNode("file", 3) pu.AddConfigItem("file", "/tmp/snap-TestCollectPublishWorkflow.out") pr := wmap.NewProcessNode("passthru", 1) time.Sleep(100 * time.Millisecond) pr.Add(pu) w.CollectNode.Add(pr) Convey("Start scheduler", func() { err := s.Start() So(err, ShouldBeNil) Convey("Create task", func() { t, err := s.CreateTask(schedule.NewSimpleSchedule(time.Millisecond*500), w, false) So(err.Errors(), ShouldBeEmpty) So(t, ShouldNotBeNil) t.(*task).Spin() time.Sleep(3 * time.Second) }) }) }) }) }
func TestMockPluginLoad(t *testing.T) { // These tests only work if SNAP_PATH is known. // It is the responsibility of the testing framework to // build the plugins first into the build dir. Convey("make sure plugin has been built", t, func() { err := helper.CheckPluginBuilt(SnapPath, PluginName) So(err, ShouldBeNil) Convey("ensure plugin loads and responds", func() { c := control.New(control.GetDefaultConfig()) c.Start() rp, _ := core.NewRequestedPlugin(PluginPath) _, err := c.Load(rp) So(err, ShouldBeNil) }) }) }
func TestMockPluginLoad(t *testing.T) { // These tests only work if SNAP_PATH is known. // It is the responsibility of the testing framework to // build the plugins first into the build dir. if SnapPath != "" { // Helper plugin trigger build if possible for this plugin helper.BuildPlugin(PluginType, PluginName) // Convey("ensure plugin loads and responds", t, func() { c := control.New() c.Start() rp, _ := core.NewRequestedPlugin(PluginPath) _, err := c.Load(rp) So(err, ShouldBeNil) }) } else { fmt.Printf("SNAP_PATH not set. Cannot test %s plugin.\n", PluginName) } }
// REST API instances that are started are killed when the tests end. // When we eventually have a REST API Stop command this can be killed. func startAPI() string { // Start a REST API to talk to rest.StreamingBufferWindow = 0.01 log.SetLevel(LOG_LEVEL) r, _ := rest.New(false, "", "") c := control.New() c.Start() s := scheduler.New() s.SetMetricManager(c) s.Start() r.BindConfigManager(c.Config) r.BindMetricManager(c) r.BindTaskManager(s) err := r.Start("127.0.0.1:0") if err != nil { // Panic on an error panic(err) } time.Sleep(100 * time.Millisecond) return fmt.Sprintf("http://localhost:%d", r.Port()) }
func TestFilePublisherLoad(t *testing.T) { // These tests only work if SNAP_PATH is known. // It is the responsibility of the testing framework to // build the plugins first into the build dir. if SnapPath != "" { // Helper plugin trigger build if possible for this plugin helper.BuildPlugin(PluginType, PluginName) // //TODO cannot test this locally. We need AMQP and integration tests. SkipConvey("ensure plugin loads and responds", t, func() { c := control.New(control.GetDefaultConfig()) c.Start() rp, _ := core.NewRequestedPlugin(PluginPath) _, err := c.Load(rp) So(err, ShouldBeNil) }) } else { fmt.Printf("SNAP_PATH not set. Cannot test %s plugin.\n", PluginName) } }
func TestDistributedWorkflow(t *testing.T) { Convey("Create a scheduler with 2 controls and load plugins", t, func() { l, _ := net.Listen("tcp", ":0") l.Close() cfg := control.GetDefaultConfig() cfg.ListenPort = l.Addr().(*net.TCPAddr).Port c1 := control.New(cfg) c1.Start() m, _ := net.Listen("tcp", ":0") m.Close() cfg.ListenPort = m.Addr().(*net.TCPAddr).Port port1 := cfg.ListenPort c2 := control.New(cfg) schcfg := GetDefaultConfig() sch := New(schcfg) c2.Start() sch.SetMetricManager(c1) err := sch.Start() So(err, ShouldBeNil) // Load appropriate plugins into each control. mock2Path := path.Join(PluginPath, "snap-collector-mock2") passthruPath := path.Join(PluginPath, "snap-processor-passthru") filePath := path.Join(PluginPath, "snap-publisher-file") // mock2 and file onto c1 rp, err := core.NewRequestedPlugin(mock2Path) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) rp, err = core.NewRequestedPlugin(filePath) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) // passthru on c2 rp, err = core.NewRequestedPlugin(passthruPath) So(err, ShouldBeNil) passthru, err := c2.Load(rp) So(err, ShouldBeNil) Convey("Test task with one local and one remote node", func() { //Create a task //Create a workflowmap wf := dsWFMap(port1) t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) }) Convey("Test task with invalid remote port", func() { wf := dsWFMap(0) controlproxy.MAX_CONNECTION_TIMEOUT = 1 * time.Second t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 1) So(t, ShouldBeNil) }) Convey("Test task without remote plugin", func() { _, err := c2.Unload(passthru) So(err, ShouldBeNil) wf := dsWFMap(port1) t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 1) So(t, ShouldBeNil) }) Convey("Test task failing when control is stopped while task is running", func() { wf := dsWFMap(port1) controlproxy.MAX_CONNECTION_TIMEOUT = 10 * time.Second interval := time.Millisecond * 100 t, errs := sch.CreateTask(schedule.NewSimpleSchedule(interval), wf, true) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) c2.Stop() // Give task time to fail time.Sleep(time.Second) tasks := sch.GetTasks() var task core.Task for _, v := range tasks { task = v } So(task.State(), ShouldEqual, core.TaskDisabled) }) }) }
func action(ctx *cli.Context) { // get default configuration cfg := getDefaultConfig() // read config file readConfig(cfg, ctx.String("config")) // apply values that may have been passed from the command line // to the configuration that we have built so far, overriding the // values that may have already been set (if any) for the // same variables in that configuration applyCmdLineFlags(cfg, ctx) // If logPath is set, we verify the logPath and set it so that all logging // goes to the log file instead of stdout. logPath := cfg.LogPath if logPath != "" { f, err := os.Stat(logPath) if err != nil { log.Fatal(err) } if !f.IsDir() { log.Fatal("log path provided must be a directory") } file, err := os.OpenFile(fmt.Sprintf("%s/snapd.log", logPath), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatal(err) } defer file.Close() log.SetOutput(file) } log.Info("Starting snapd (version: ", gitversion, ")") // Set Max Processors for snapd. setMaxProcs(cfg.GoMaxProcs) // Validate log level and trust level settings for snapd validateLevelSettings(cfg.LogLevel, cfg.Control.PluginTrust) c := control.New(cfg.Control) coreModules = []coreModule{} coreModules = append(coreModules, c) s := scheduler.New(cfg.Scheduler) s.SetMetricManager(c) coreModules = append(coreModules, s) // Auth requested and not provided as part of config if cfg.RestAPI.Enable && cfg.RestAPI.RestAuth && cfg.RestAPI.RestAuthPassword == "" { fmt.Println("What password do you want to use for authentication?") fmt.Print("Password:"******"Failed to get credentials") } cfg.RestAPI.RestAuthPassword = string(password) } var tr managesTribe if cfg.Tribe.Enable { cfg.Tribe.RestAPIPort = cfg.RestAPI.Port if cfg.RestAPI.RestAuth { cfg.Tribe.RestAPIPassword = cfg.RestAPI.RestAuthPassword } log.Info("Tribe is enabled") t, err := tribe.New(cfg.Tribe) if err != nil { printErrorAndExit(t.Name(), err) } c.RegisterEventHandler("tribe", t) t.SetPluginCatalog(c) s.RegisterEventHandler("tribe", t) t.SetTaskManager(s) coreModules = append(coreModules, t) tr = t } //Setup RESTful API if it was enabled in the configuration if cfg.RestAPI.Enable { r, err := rest.New(cfg.RestAPI) if err != nil { log.Fatal(err) } r.BindMetricManager(c) r.BindConfigManager(c.Config) r.BindTaskManager(s) //Rest Authentication if cfg.RestAPI.RestAuth { log.Info("REST API authentication is enabled") r.SetAPIAuth(cfg.RestAPI.RestAuth) log.Info("REST API authentication password is set") r.SetAPIAuthPwd(cfg.RestAPI.RestAuthPassword) if !cfg.RestAPI.HTTPS { log.Warning("Using REST API authentication without HTTPS enabled.") } } if tr != nil { r.BindTribeManager(tr) } go monitorErrors(r.Err()) r.SetAddress(cfg.RestAPI.Address, cfg.RestAPI.Port) coreModules = append(coreModules, r) log.Info("REST API is enabled") } else { log.Info("REST API is disabled") } // Set interrupt handling so we can die gracefully. startInterruptHandling(coreModules...) // Start our modules var started []coreModule for _, m := range coreModules { if err := startModule(m); err != nil { for _, m := range started { m.Stop() } printErrorAndExit(m.Name(), err) } started = append(started, m) } // Plugin Trust c.SetPluginTrustLevel(cfg.Control.PluginTrust) log.Info("setting plugin trust level to: ", t[cfg.Control.PluginTrust]) // Keyring checking for trust levels 1 and 2 if cfg.Control.PluginTrust > 0 { keyrings := filepath.SplitList(cfg.Control.KeyringPaths) if len(keyrings) == 0 { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Fatal("need keyring file when trust is on (--keyring-file or -k)") } for _, k := range keyrings { keyringPath, err := filepath.Abs(k) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("Unable to determine absolute path to keyring file") } f, err := os.Stat(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("bad keyring file") } if f.IsDir() { log.Info("Adding keyrings from: ", keyringPath) files, err := ioutil.ReadDir(keyringPath) if err != nil { log.WithFields( log.Fields{ "_block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal(err) } if len(files) == 0 { log.Fatal(fmt.Sprintf("given keyring path [%s] is an empty directory!", keyringPath)) } for _, keyringFile := range files { if keyringFile.IsDir() { continue } if strings.HasSuffix(keyringFile.Name(), ".gpg") || (strings.HasSuffix(keyringFile.Name(), ".pub")) || (strings.HasSuffix(keyringFile.Name(), ".pubring")) { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Warning("unable to open keyring file. not adding to keyring path") continue } f.Close() log.Info("adding keyring file: ", keyringPath+"/"+keyringFile.Name()) c.SetKeyringFile(keyringPath + "/" + keyringFile.Name()) } } } else { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("unable to open keyring file.") } f.Close() log.Info("adding keyring file ", keyringPath) c.SetKeyringFile(keyringPath) } } } log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Info("snapd started") // Switch log level to user defined log.Info("setting log level to: ", l[cfg.LogLevel]) log.SetLevel(getLevel(cfg.LogLevel)) select {} //run forever and ever }
func startTribes(count int) []int { seed := "" var wg sync.WaitGroup var mgtPorts []int for i := 0; i < count; i++ { mgtPort := getPort() mgtPorts = append(mgtPorts, mgtPort) tribePort := getPort() conf := tribe.GetDefaultConfig() conf.Name = fmt.Sprintf("member-%v", mgtPort) conf.BindAddr = "127.0.0.1" conf.BindPort = tribePort conf.Seed = seed conf.RestAPIPort = mgtPort conf.MemberlistConfig.PushPullInterval = 5 * time.Second conf.MemberlistConfig.RetransmitMult = conf.MemberlistConfig.RetransmitMult * 2 if seed == "" { seed = fmt.Sprintf("%s:%d", "127.0.0.1", tribePort) } t, err := tribe.New(conf) if err != nil { panic(err) } c := control.New(control.GetDefaultConfig()) c.RegisterEventHandler("tribe", t) c.Start() s := scheduler.New(scheduler.GetDefaultConfig()) s.SetMetricManager(c) s.RegisterEventHandler("tribe", t) s.Start() t.SetPluginCatalog(c) t.SetTaskManager(s) t.Start() r, _ := rest.New(rest.GetDefaultConfig()) r.BindMetricManager(c) r.BindTaskManager(s) r.BindTribeManager(t) r.SetAddress("", mgtPort) r.Start() wg.Add(1) timer := time.After(10 * time.Second) go func(port int) { defer wg.Done() for { select { case <-timer: panic("timed out") default: time.Sleep(100 * time.Millisecond) resp := getMembers(port) if resp.Meta.Code == 200 && len(resp.Body.(*rbody.TribeMemberList).Members) == count { log.Infof("num of members %v", len(resp.Body.(*rbody.TribeMemberList).Members)) return } } } }(mgtPort) } wg.Wait() uris := make([]int, len(mgtPorts)) for idx, port := range mgtPorts { uris[idx] = port } return uris }
// returns an array of the mgtports and the tribe port for the last node func startTribes(count int, seed string) ([]int, int, *listenToSeedEvents) { var wg sync.WaitGroup var tribePort int var mgtPorts []int lpe := newListenToSeedEvents() for i := 0; i < count; i++ { mgtPort := getAvailablePort() mgtPorts = append(mgtPorts, mgtPort) tribePort = getAvailablePort() conf := tribe.GetDefaultConfig() conf.Name = fmt.Sprintf("member-%v", mgtPort) conf.BindAddr = "127.0.0.1" conf.BindPort = tribePort conf.Seed = seed conf.RestAPIPort = mgtPort //conf.MemberlistConfig.PushPullInterval = 5 * time.Second conf.MemberlistConfig.RetransmitMult = conf.MemberlistConfig.RetransmitMult * 2 t, err := tribe.New(conf) if err != nil { panic(err) } if seed == "" { seed = fmt.Sprintf("%s:%d", "127.0.0.1", tribePort) t.EventManager.RegisterHandler("tribe.tests", lpe) } cfg := control.GetDefaultConfig() // get an available port to avoid conflicts (we aren't testing remote workflows here) cfg.ListenPort = getAvailablePort() c := control.New(cfg) c.RegisterEventHandler("tribe", t) c.Start() s := scheduler.New(scheduler.GetDefaultConfig()) s.SetMetricManager(c) s.RegisterEventHandler("tribe", t) s.Start() t.SetPluginCatalog(c) t.SetTaskManager(s) t.Start() r, _ := New(GetDefaultConfig()) r.BindMetricManager(c) r.BindTaskManager(s) r.BindTribeManager(t) r.SetAddress(fmt.Sprintf("127.0.0.1:%d", mgtPort)) r.Start() wg.Add(1) timer := time.After(10 * time.Second) go func(port int) { defer wg.Done() for { select { case <-timer: panic("timed out") default: time.Sleep(100 * time.Millisecond) resp := getMembers(port) if resp.Meta.Code == 200 && len(resp.Body.(*rbody.TribeMemberList).Members) >= count { restLogger.Infof("num of members %v", len(resp.Body.(*rbody.TribeMemberList).Members)) return } } } }(mgtPort) } wg.Wait() return mgtPorts, tribePort, lpe }
func action(ctx *cli.Context) error { // get default configuration cfg := getDefaultConfig() // read config file readConfig(cfg, ctx.String("config")) // apply values that may have been passed from the command line // to the configuration that we have built so far, overriding the // values that may have already been set (if any) for the // same variables in that configuration applyCmdLineFlags(cfg, ctx) // test the resulting configuration to ensure the values it contains still pass the // constraints after applying the environment variables and command-line parameters; // if errors are found, report them and exit with a fatal error jb, _ := json.Marshal(cfg) serrs := cfgfile.ValidateSchema(CONFIG_CONSTRAINTS, string(jb)) if serrs != nil { for _, serr := range serrs { log.WithFields(serr.Fields()).Error(serr.Error()) } log.Fatal("Errors found after applying command-line flags") } // If logPath is set, we verify the logPath and set it so that all logging // goes to the log file instead of stdout. logPath := cfg.LogPath if logPath != "" { f, err := os.Stat(logPath) if err != nil { log.Fatal(err) } if !f.IsDir() { log.Fatal("log path provided must be a directory") } aMode := os.O_APPEND if cfg.LogTruncate { aMode = os.O_TRUNC } file, err := os.OpenFile(fmt.Sprintf("%s/snapd.log", logPath), os.O_RDWR|os.O_CREATE|aMode, 0666) if err != nil { log.Fatal(err) } defer file.Close() log.SetOutput(file) } // Because even though github.com/Sirupsen/logrus states that // 'Logs the event in colors if stdout is a tty, otherwise without colors' // Seems like this does not work // Please note however that the default output format without colors is somewhat different (timestamps, ...) // // We could also restrict this command line parameter to only apply when no logpath is given // and forcing the coloring to off when using a file but this might not please users who like to use // redirect mechanisms like # snapd -t 0 -l 1 2>&1 | tee my.log if !cfg.LogColors { log.SetFormatter(&log.TextFormatter{DisableColors: true}) } // Validate log level and trust level settings for snapd validateLevelSettings(cfg.LogLevel, cfg.Control.PluginTrust) // Switch log level to user defined log.SetLevel(getLevel(cfg.LogLevel)) log.Info("setting log level to: ", l[cfg.LogLevel]) log.Info("Starting snapd (version: ", gitversion, ")") // Set Max Processors for snapd. setMaxProcs(cfg.GoMaxProcs) c := control.New(cfg.Control) coreModules = []coreModule{} coreModules = append(coreModules, c) s := scheduler.New(cfg.Scheduler) s.SetMetricManager(c) coreModules = append(coreModules, s) // Auth requested and not provided as part of config if cfg.RestAPI.Enable && cfg.RestAPI.RestAuth && cfg.RestAPI.RestAuthPassword == "" { fmt.Println("What password do you want to use for authentication?") fmt.Print("Password:"******"Failed to get credentials") } cfg.RestAPI.RestAuthPassword = string(password) } var tr managesTribe if cfg.Tribe.Enable { cfg.Tribe.RestAPIPort = cfg.RestAPI.Port if cfg.RestAPI.RestAuth { cfg.Tribe.RestAPIPassword = cfg.RestAPI.RestAuthPassword } log.Info("Tribe is enabled") t, err := tribe.New(cfg.Tribe) if err != nil { printErrorAndExit(t.Name(), err) } c.RegisterEventHandler("tribe", t) t.SetPluginCatalog(c) s.RegisterEventHandler("tribe", t) t.SetTaskManager(s) coreModules = append(coreModules, t) tr = t } //Setup RESTful API if it was enabled in the configuration if cfg.RestAPI.Enable { r, err := rest.New(cfg.RestAPI) if err != nil { log.Fatal(err) } r.BindMetricManager(c) r.BindConfigManager(c.Config) r.BindTaskManager(s) //Rest Authentication if cfg.RestAPI.RestAuth { log.Info("REST API authentication is enabled") r.SetAPIAuth(cfg.RestAPI.RestAuth) log.Info("REST API authentication password is set") r.SetAPIAuthPwd(cfg.RestAPI.RestAuthPassword) if !cfg.RestAPI.HTTPS { log.Warning("Using REST API authentication without HTTPS enabled.") } } if tr != nil { r.BindTribeManager(tr) } go monitorErrors(r.Err()) coreModules = append(coreModules, r) log.Info("REST API is enabled") } else { log.Info("REST API is disabled") } // Set interrupt handling so we can die gracefully. startInterruptHandling(coreModules...) // Start our modules var started []coreModule for _, m := range coreModules { if err := startModule(m); err != nil { for _, m := range started { m.Stop() } printErrorAndExit(m.Name(), err) } started = append(started, m) } // Plugin Trust c.SetPluginTrustLevel(cfg.Control.PluginTrust) log.Info("setting plugin trust level to: ", t[cfg.Control.PluginTrust]) // Keyring checking for trust levels 1 and 2 if cfg.Control.PluginTrust > 0 { keyrings := filepath.SplitList(cfg.Control.KeyringPaths) if len(keyrings) == 0 { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Fatal("need keyring file when trust is on (--keyring-file or -k)") } for _, k := range keyrings { keyringPath, err := filepath.Abs(k) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("Unable to determine absolute path to keyring file") } f, err := os.Stat(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("bad keyring file") } if f.IsDir() { log.Info("Adding keyrings from: ", keyringPath) files, err := ioutil.ReadDir(keyringPath) if err != nil { log.WithFields( log.Fields{ "_block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal(err) } if len(files) == 0 { log.Fatal(fmt.Sprintf("given keyring path [%s] is an empty directory!", keyringPath)) } for _, keyringFile := range files { if keyringFile.IsDir() { continue } if strings.HasSuffix(keyringFile.Name(), ".gpg") || (strings.HasSuffix(keyringFile.Name(), ".pub")) || (strings.HasSuffix(keyringFile.Name(), ".pubring")) { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Warning("unable to open keyring file. not adding to keyring path") continue } f.Close() log.Info("adding keyring file: ", keyringPath+"/"+keyringFile.Name()) c.SetKeyringFile(keyringPath + "/" + keyringFile.Name()) } } } else { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("unable to open keyring file.") } f.Close() log.Info("adding keyring file ", keyringPath) c.SetKeyringFile(keyringPath) } } } log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Info("snapd started") select {} //run forever and ever }
func TestDistributedSubscriptions(t *testing.T) { Convey("Load control/scheduler with a mock remote scheduler", t, func() { l, _ := net.Listen("tcp", ":0") l.Close() cfg := control.GetDefaultConfig() cfg.ListenPort = l.Addr().(*net.TCPAddr).Port c1 := control.New(cfg) c1.Start() m, _ := net.Listen("tcp", ":0") m.Close() cfg.ListenPort = m.Addr().(*net.TCPAddr).Port port1 := cfg.ListenPort c2 := control.New(cfg) schcfg := GetDefaultConfig() sch := New(schcfg) c2.Start() sch.SetMetricManager(c1) err := sch.Start() So(err, ShouldBeNil) // Load appropriate plugins into each control. mock2Path := helper.PluginFilePath("snap-plugin-collector-mock2") passthruPath := helper.PluginFilePath("snap-plugin-processor-passthru") filePath := helper.PluginFilePath("snap-plugin-publisher-mock-file") // mock2 and file onto c1 rp, err := core.NewRequestedPlugin(mock2Path) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) rp, err = core.NewRequestedPlugin(filePath) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) // passthru on c2 rp, err = core.NewRequestedPlugin(passthruPath) So(err, ShouldBeNil) _, err = c2.Load(rp) So(err, ShouldBeNil) Convey("Starting task should not succeed if remote dep fails to subscribe", func() { //Create a task //Create a workflowmap wf := dsWFMap(port1) // Create a task that is not started immediately so we can // validate deps correctly. t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) schTask := t.(*task) remoteMockManager := &subscriptionManager{Fail: true} schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager) localMockManager := &subscriptionManager{Fail: false} schTask.RemoteManagers.Add("", localMockManager) // Start task. We expect it to fail while subscribing deps terrs := sch.StartTask(t.ID()) So(terrs, ShouldNotBeNil) Convey("So dependencies should have been unsubscribed", func() { // Ensure that unsubscribe call count is equal to subscribe call count // i.e that every subscribe call was followed by an unsubscribe since // we errored So(remoteMockManager.UnsubscribeCallCount, ShouldEqual, remoteMockManager.SubscribeCallCount) So(localMockManager.UnsubscribeCallCount, ShouldEqual, localMockManager.UnsubscribeCallCount) }) }) Convey("Starting task should not succeed if missing local dep fails to subscribe", func() { //Create a task //Create a workflowmap wf := dsWFMap(port1) // Create a task that is not started immediately so we can // validate deps correctly. t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) schTask := t.(*task) localMockManager := &subscriptionManager{Fail: true} schTask.RemoteManagers.Add("", localMockManager) remoteMockManager := &subscriptionManager{Fail: false} schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager) // Start task. We expect it to fail while subscribing deps terrs := sch.StartTask(t.ID()) So(terrs, ShouldNotBeNil) Convey("So dependencies should have been unsubscribed", func() { // Ensure that unsubscribe call count is equal to subscribe call count // i.e that every subscribe call was followed by an unsubscribe since // we errored So(remoteMockManager.UnsubscribeCallCount, ShouldEqual, remoteMockManager.SubscribeCallCount) So(localMockManager.UnsubscribeCallCount, ShouldEqual, localMockManager.UnsubscribeCallCount) }) }) Convey("Starting task should suceed if all deps are available", func() { //Create a task //Create a workflowmap wf := dsWFMap(port1) // Create a task that is not started immediately so we can // validate deps correctly. t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) schTask := t.(*task) localMockManager := &subscriptionManager{Fail: false} schTask.RemoteManagers.Add("", localMockManager) remoteMockManager := &subscriptionManager{Fail: false} schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager) terrs := sch.StartTask(t.ID()) So(terrs, ShouldBeNil) Convey("So all depndencies should have been subscribed to", func() { // Ensure that unsubscribe call count is equal to subscribe call count // i.e that every subscribe call was followed by an unsubscribe since // we errored So(localMockManager.SubscribeCallCount, ShouldBeGreaterThan, 0) So(remoteMockManager.SubscribeCallCount, ShouldBeGreaterThan, 0) }) }) }) }
func action(ctx *cli.Context) { // If logPath is set, we verify the logPath and set it so that all logging // goes to the log file instead of stdout. logPath := ctx.String("log-path") if logPath != "" { f, err := os.Stat(logPath) if err != nil { log.Fatal(err) } if !f.IsDir() { log.Fatal("log path provided must be a directory") } file, err := os.OpenFile(fmt.Sprintf("%s/snap.log", logPath), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatal(err) } defer file.Close() log.SetOutput(file) } var l = map[int]string{ 1: "debug", 2: "info", 3: "warning", 4: "error", 5: "fatal", } var t = map[int]string{ 0: "disabled", 1: "enabled", 2: "warning", } logLevel := ctx.Int("log-level") maxProcs := ctx.Int("max-procs") disableAPI := ctx.Bool("disable-api") apiPort := ctx.Int("api-port") autodiscoverPath := ctx.String("auto-discover") maxRunning := ctx.Int("max-running-plugins") pluginTrust := ctx.Int("plugin-trust") keyringPaths := ctx.String("keyring-files") cachestr := ctx.String("cache-expiration") isTribeEnabled := ctx.Bool("tribe") tribeSeed := ctx.String("tribe-seed") tribeNodeName := ctx.String("tribe-node-name") tribeAddr := ctx.String("tribe-addr") tribePort := ctx.Int("tribe-port") cache, err := time.ParseDuration(cachestr) if err != nil { log.Fatal(fmt.Sprintf("invalid cache-expiration format: %s", cachestr)) } config := ctx.String("config") restHttps := ctx.Bool("rest-https") restKey := ctx.String("rest-key") restCert := ctx.String("rest-cert") log.Info("Starting snapd (version: ", gitversion, ")") // Set Max Processors for snapd. setMaxProcs(maxProcs) // Validate log level and trust level settings for snapd validateLevelSettings(logLevel, pluginTrust) controlOpts := []control.PluginControlOpt{ control.MaxRunningPlugins(maxRunning), control.CacheExpiration(cache), } if config != "" { b, err := ioutil.ReadFile(config) if err != nil { log.WithFields(log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "path": config, }).Fatal("unable to read config") } cfg := control.NewConfig() err = json.Unmarshal(b, &cfg) if err != nil { log.WithFields(log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "path": config, }).Fatal("invalid config") } controlOpts = append(controlOpts, control.OptSetConfig(cfg)) } c := control.New( controlOpts..., ) coreModules = []coreModule{} coreModules = append(coreModules, c) s := scheduler.New( scheduler.CollectQSizeOption(defaultQueueSize), scheduler.CollectWkrSizeOption(defaultPoolSize), scheduler.PublishQSizeOption(defaultQueueSize), scheduler.PublishWkrSizeOption(defaultPoolSize), scheduler.ProcessQSizeOption(defaultQueueSize), scheduler.ProcessWkrSizeOption(defaultPoolSize), ) s.SetMetricManager(c) coreModules = append(coreModules, s) var tr managesTribe if isTribeEnabled { log.Info("Tribe is enabled") tc := tribe.DefaultConfig(tribeNodeName, tribeAddr, tribePort, tribeSeed, apiPort) t, err := tribe.New(tc) if err != nil { printErrorAndExit(t.Name(), err) } c.RegisterEventHandler("tribe", t) t.SetPluginCatalog(c) s.RegisterEventHandler("tribe", t) t.SetTaskManager(s) coreModules = append(coreModules, t) tr = t } // Set interrupt handling so we can die gracefully. startInterruptHandling(coreModules...) // Start our modules var started []coreModule for _, m := range coreModules { if err := startModule(m); err != nil { for _, m := range started { m.Stop() } printErrorAndExit(m.Name(), err) } started = append(started, m) } //Plugin Trust c.SetPluginTrustLevel(pluginTrust) log.Info("setting plugin trust level to: ", t[pluginTrust]) //Keyring checking for trust levels 1 and 2 if pluginTrust > 0 { keyrings := filepath.SplitList(keyringPaths) if len(keyrings) == 0 { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Fatal("need keyring file when trust is on (--keyring-file or -k)") } for _, k := range keyrings { keyringPath, err := filepath.Abs(k) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("Unable to determine absolute path to keyring file") } f, err := os.Stat(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("bad keyring file") } if f.IsDir() { log.Info("Adding keyrings from: ", keyringPath) files, err := ioutil.ReadDir(keyringPath) if err != nil { log.WithFields( log.Fields{ "_block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal(err) } if len(files) == 0 { log.Fatal(fmt.Sprintf("given keyring path [%s] is an empty directory!", keyringPath)) } for _, keyringFile := range files { if keyringFile.IsDir() { continue } if strings.HasSuffix(keyringFile.Name(), ".gpg") || (strings.HasSuffix(keyringFile.Name(), ".pub")) || (strings.HasSuffix(keyringFile.Name(), ".pubring")) { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Warning("unable to open keyring file. not adding to keyring path") continue } f.Close() log.Info("adding keyring file: ", keyringPath+"/"+keyringFile.Name()) c.SetKeyringFile(keyringPath + "/" + keyringFile.Name()) } } } else { f, err := os.Open(keyringPath) if err != nil { log.WithFields( log.Fields{ "block": "main", "_module": "snapd", "error": err.Error(), "keyringPath": keyringPath, }).Fatal("unable to open keyring file.") } f.Close() log.Info("adding keyring file ", keyringPath) c.SetKeyringFile(keyringPath) } } } //Autodiscover if autodiscoverPath != "" { log.Info("auto discover path is enabled") paths := filepath.SplitList(autodiscoverPath) c.SetAutodiscoverPaths(paths) for _, p := range paths { fullPath, err := filepath.Abs(p) if err != nil { log.WithFields( log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": p, }).Fatal(err) } log.Info("autoloading plugins from: ", fullPath) files, err := ioutil.ReadDir(fullPath) if err != nil { log.WithFields( log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": fullPath, }).Fatal(err) } for _, file := range files { if file.IsDir() { continue } if strings.HasSuffix(file.Name(), ".aci") || !(strings.HasSuffix(file.Name(), ".asc")) { rp, err := core.NewRequestedPlugin(path.Join(fullPath, file.Name())) if err != nil { log.WithFields(log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": fullPath, "plugin": file, }).Error(err) } signatureFile := file.Name() + ".asc" if _, err := os.Stat(path.Join(fullPath, signatureFile)); err == nil { err = rp.ReadSignatureFile(path.Join(fullPath, signatureFile)) if err != nil { log.WithFields(log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": fullPath, "plugin": file.Name() + ".asc", }).Error(err) } } pl, err := c.Load(rp) if err != nil { log.WithFields(log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": fullPath, "plugin": file, }).Error(err) } else { log.WithFields(log.Fields{ "_block": "main", "_module": "snapd", "autodiscoverpath": fullPath, "plugin": file, "plugin-name": pl.Name(), "plugin-version": pl.Version(), "plugin-type": pl.TypeName(), }).Info("Loading plugin") } } } } } else { log.Info("auto discover path is disabled") } //API if !disableAPI { r, err := rest.New(restHttps, restCert, restKey) if err != nil { log.Fatal(err) return } r.BindMetricManager(c) r.BindConfigManager(c.Config) r.BindTaskManager(s) if tr != nil { r.BindTribeManager(tr) } r.Start(fmt.Sprintf(":%d", apiPort)) go monitorErrors(r.Err()) log.Info("Rest API is enabled") } else { log.Info("Rest API is disabled") } log.WithFields( log.Fields{ "block": "main", "_module": "snapd", }).Info("snapd started") // Switch log level to user defined log.Info("setting log level to: ", l[logLevel]) log.SetLevel(getLevel(logLevel)) select {} //run forever and ever }
func TestDistributedWorkflow(t *testing.T) { Convey("Create a scheduler with 2 controls and load plugins", t, func() { l, _ := net.Listen("tcp", ":0") l.Close() cfg := control.GetDefaultConfig() cfg.ListenPort = l.Addr().(*net.TCPAddr).Port c1 := control.New(cfg) c1.Start() m, _ := net.Listen("tcp", ":0") m.Close() cfg.ListenPort = m.Addr().(*net.TCPAddr).Port port1 := cfg.ListenPort c2 := control.New(cfg) schcfg := GetDefaultConfig() sch := New(schcfg) c2.Start() sch.SetMetricManager(c1) err := sch.Start() So(err, ShouldBeNil) // Load appropriate plugins into each control. mock2Path := helper.PluginFilePath("snap-plugin-collector-mock2") passthruPath := helper.PluginFilePath("snap-plugin-processor-passthru") filePath := helper.PluginFilePath("snap-plugin-publisher-mock-file") // mock2 and file onto c1 rp, err := core.NewRequestedPlugin(mock2Path) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) rp, err = core.NewRequestedPlugin(filePath) So(err, ShouldBeNil) _, err = c1.Load(rp) So(err, ShouldBeNil) // passthru on c2 rp, err = core.NewRequestedPlugin(passthruPath) So(err, ShouldBeNil) passthru, err := c2.Load(rp) So(err, ShouldBeNil) Convey("Test task with one local and one remote node", func() { //Create a task //Create a workflowmap wf := dsWFMap(port1) t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) // stop the scheduler and control (since in nested Convey statements, the // statements in the outer Convey execute for each of the inner Conveys // independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order // for details on execution order in Convey) sch.Stop() c2.Stop() }) Convey("Test task with invalid remote port", func() { wf := dsWFMap(0) controlproxy.MAX_CONNECTION_TIMEOUT = 1 * time.Second t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 1) So(t, ShouldBeNil) // stop the scheduler and control (since in nested Convey statements, the // statements in the outer Convey execute for each of the inner Conveys // independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order // for details on execution order in Convey) sch.Stop() c2.Stop() }) Convey("Test task without remote plugin", func() { _, err := c2.Unload(passthru) So(err, ShouldBeNil) wf := dsWFMap(port1) t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true) So(len(errs.Errors()), ShouldEqual, 1) So(t, ShouldBeNil) // stop the scheduler and control (since in nested Convey statements, the // statements in the outer Convey execute for each of the inner Conveys // independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order // for details on execution order in Convey) sch.Stop() c2.Stop() }) Convey("Test task failing when control is stopped while task is running", func() { wf := dsWFMap(port1) // set timeout so that connection attempt through the controlproxy will fail after 1 second controlproxy.MAX_CONNECTION_TIMEOUT = time.Second // define an interval that the simple scheduler will run on every 100ms interval := time.Millisecond * 100 // create our task; should be disabled after 3 failures t, errs := sch.CreateTask(schedule.NewSimpleSchedule(interval), wf, true) // ensure task was created successfully So(len(errs.Errors()), ShouldEqual, 0) So(t, ShouldNotBeNil) // create a channel to listen on for a response and setup an event handler // that will respond on that channel once the 'TaskDisabledEvent' arrives respChan := make(chan struct{}) sch.RegisterEventHandler("test", &failHandler{respChan}) // then stop the controller c2.Stop() // and wait for the response (with a 30 second timeout; just in case) var ok bool select { case <-time.After(30 * time.Second): // if get here, the select timed out waiting for a response; we don't // expect to hit this timeout since it should only take 3 seconds for // the workflow to fail to connect to the gRPC server three times, but // it might if the task did not fail as expected So("Timeout triggered waiting for disabled event", ShouldBeBlank) case <-respChan: // if get here, we got a response on the respChan ok = true } So(ok, ShouldEqual, true) // stop the scheduler (since in nested Convey statements, the // statements in the outer Convey execute for each of the inner Conveys // independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order // for details on execution order in Convey) sch.Stop() }) }) }