Exemplo n.º 1
0
func TestCollectPublishWorkflow(t *testing.T) {
	log.SetLevel(log.FatalLevel)
	Convey("Given a started plugin control", t, func() {

		c := control.New()
		c.Start()
		s := New()
		s.SetMetricManager(c)
		Convey("create a workflow", func() {
			rp, err := core.NewRequestedPlugin(snap_collector_mock2_path)
			So(err, ShouldBeNil)
			_, err = c.Load(rp)
			So(err, ShouldBeNil)
			rp2, err := core.NewRequestedPlugin(snap_publisher_file_path)
			So(err, ShouldBeNil)
			_, err = c.Load(rp2)
			So(err, ShouldBeNil)
			rp3, err := core.NewRequestedPlugin(snap_processor_passthru_path)
			So(err, ShouldBeNil)
			_, err = c.Load(rp3)
			So(err, ShouldBeNil)
			time.Sleep(100 * time.Millisecond)

			metrics, err2 := c.MetricCatalog()
			So(err2, ShouldBeNil)
			So(metrics, ShouldNotBeEmpty)

			w := wmap.NewWorkflowMap()
			w.CollectNode.AddMetric("/intel/mock/foo", 2)
			w.CollectNode.AddConfigItem("/intel/mock/foo", "password", "secret")

			pu := wmap.NewPublishNode("file", 3)
			pu.AddConfigItem("file", "/tmp/snap-TestCollectPublishWorkflow.out")

			pr := wmap.NewProcessNode("passthru", 1)
			time.Sleep(100 * time.Millisecond)

			pr.Add(pu)
			w.CollectNode.Add(pr)

			Convey("Start scheduler", func() {
				err := s.Start()
				So(err, ShouldBeNil)
				Convey("Create task", func() {
					t, err := s.CreateTask(schedule.NewSimpleSchedule(time.Millisecond*500), w, false)
					So(err.Errors(), ShouldBeEmpty)
					So(t, ShouldNotBeNil)
					t.(*task).Spin()
					time.Sleep(3 * time.Second)

				})
			})
		})
	})
}
func loadPlg(c *pluginControl, paths ...string) (core.CatalogedPlugin, serror.SnapError) {
	// This is a Travis optimized loading of plugins. From time to time, tests will error in Travis
	// due to a timeout when waiting for a response from a plugin. We are going to attempt loading a plugin
	// 3 times before letting the error through. Hopefully this cuts down on the number of Travis failures
	var e serror.SnapError
	var p core.CatalogedPlugin
	rp, err := core.NewRequestedPlugin(paths[0])
	if err != nil {
		return nil, serror.New(err)
	}
	if len(paths) > 1 {
		rp.SetSignature([]byte{00, 00, 00})
	}
	for i := 0; i < 3; i++ {
		p, e = c.Load(rp)
		if e == nil {
			break
		}
		if e != nil && i == 2 {
			return nil, e

		}
	}
	return p, nil
}
Exemplo n.º 3
0
func TestMockPluginLoad(t *testing.T) {
	// These tests only work if SNAP_PATH is known.
	// It is the responsibility of the testing framework to
	// build the plugins first into the build dir.
	Convey("make sure plugin has been built", t, func() {
		err := helper.CheckPluginBuilt(SnapPath, PluginName)
		So(err, ShouldBeNil)

		Convey("ensure plugin loads and responds", func() {
			c := control.New(control.GetDefaultConfig())
			c.Start()
			rp, _ := core.NewRequestedPlugin(PluginPath)
			_, err := c.Load(rp)

			So(err, ShouldBeNil)
		})

	})
}
Exemplo n.º 4
0
func TestMockPluginLoad(t *testing.T) {
	// These tests only work if SNAP_PATH is known.
	// It is the responsibility of the testing framework to
	// build the plugins first into the build dir.
	if SnapPath != "" {
		// Helper plugin trigger build if possible for this plugin
		helper.BuildPlugin(PluginType, PluginName)
		//
		Convey("ensure plugin loads and responds", t, func() {
			c := control.New()
			c.Start()
			rp, _ := core.NewRequestedPlugin(PluginPath)
			_, err := c.Load(rp)

			So(err, ShouldBeNil)
		})
	} else {
		fmt.Printf("SNAP_PATH not set. Cannot test %s plugin.\n", PluginName)
	}
}
Exemplo n.º 5
0
func TestFilePublisherLoad(t *testing.T) {
	// These tests only work if SNAP_PATH is known.
	// It is the responsibility of the testing framework to
	// build the plugins first into the build dir.
	if SnapPath != "" {
		// Helper plugin trigger build if possible for this plugin
		helper.BuildPlugin(PluginType, PluginName)
		//
		//TODO cannot test this locally. We need AMQP and integration tests.
		SkipConvey("ensure plugin loads and responds", t, func() {
			c := control.New(control.GetDefaultConfig())
			c.Start()
			rp, _ := core.NewRequestedPlugin(PluginPath)
			_, err := c.Load(rp)

			So(err, ShouldBeNil)
		})
	} else {
		fmt.Printf("SNAP_PATH not set. Cannot test %s plugin.\n", PluginName)
	}
}
Exemplo n.º 6
0
// Begin handling load, unload, and inventory
func (p *pluginControl) Start() error {
	// Start pluginManager when pluginControl starts
	p.Started = true
	controlLogger.WithFields(log.Fields{
		"_block": "start",
	}).Info("control started")
	//Autodiscover
	if p.Config.AutoDiscoverPath != "" {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is enabled")
		paths := filepath.SplitList(p.Config.AutoDiscoverPath)
		p.SetAutodiscoverPaths(paths)
		for _, pa := range paths {
			fullPath, err := filepath.Abs(pa)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			controlLogger.WithFields(log.Fields{
				"_block": "start",
			}).Info("autoloading plugins from: ", fullPath)
			files, err := ioutil.ReadDir(fullPath)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			for _, file := range files {
				if file.IsDir() {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring subdirectory: ", file.Name())
					continue
				}
				// Ignore tasks files (JSON and YAML)
				fname := strings.ToLower(file.Name())
				if strings.HasSuffix(fname, ".json") || strings.HasSuffix(fname, ".yaml") || strings.HasSuffix(fname, ".yml") {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring JSON/Yaml file: ", file.Name())
					continue
				}
				if strings.HasSuffix(file.Name(), ".aci") || !(strings.HasSuffix(file.Name(), ".asc")) {
					rp, err := core.NewRequestedPlugin(path.Join(fullPath, file.Name()))
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": pa,
							"plugin":           file,
						}).Error(err)
					}
					signatureFile := file.Name() + ".asc"
					if _, err := os.Stat(path.Join(fullPath, signatureFile)); err == nil {
						err = rp.ReadSignatureFile(path.Join(fullPath, signatureFile))
						if err != nil {
							controlLogger.WithFields(log.Fields{
								"_block":           "start",
								"autodiscoverpath": pa,
								"plugin":           file.Name() + ".asc",
							}).Error(err)
						}
					}
					pl, err := p.Load(rp)
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin":           file,
						}).Error(err)
					} else {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin-file-name": file.Name(),
							"plugin-name":      pl.Name(),
							"plugin-version":   pl.Version(),
							"plugin-type":      pl.TypeName(),
						}).Info("Loading plugin")
					}
				}
			}
		}
	} else {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is disabled")
	}
	return nil
}
Exemplo n.º 7
0
func TestSwapPlugin(t *testing.T) {
	if SnapPath != "" {
		c := New()
		c.Start()
		time.Sleep(100 * time.Millisecond)
		lpe := newListenToPluginEvent()
		c.eventManager.RegisterHandler("Control.PluginsSwapped", lpe)

		_, e := load(c, PluginPath)
		Convey("Loading first plugin", t, func() {
			Convey("Should not error", func() {
				So(e, ShouldBeNil)
			})
		})
		// Travis optimization: If for some reason we can not load
		// the plugin three times, we will fail the test immediately
		// as we wait on a channel to be closed before proceeding with
		// additional tests. If the plugin never loads, the channel will not
		// close and just hang the test indefinitely.
		if e != nil {
			t.FailNow()
		}
		<-lpe.done
		Convey("First plugin in catalog", t, func() {
			Convey("Should have name mock", func() {
				So(c.PluginCatalog()[0].Name(), ShouldEqual, "mock")
			})
		})
		mock1Path := strings.Replace(PluginPath, "snap-collector-mock2", "snap-collector-mock1", 1)
		mockRP, _ := core.NewRequestedPlugin(mock1Path)
		err := c.SwapPlugins(mockRP, c.PluginCatalog()[0])
		Convey("Swapping plugins", t, func() {
			Convey("Should not error", func() {
				So(err, ShouldBeNil)
			})
		})
		if err != nil {
			t.FailNow()
		}
		<-lpe.done

		// Swap plugin that was loaded with a different version of the plugin
		Convey("Swapping plugins", t, func() {
			Convey("Should generate a swapped plugins event", func() {
				Convey("So first plugin in catalog after swap should have name mock", func() {
					So(c.PluginCatalog()[0].Name(), ShouldEqual, "mock")
				})
				Convey("So swapped plugins event should show loaded plugin name as mock", func() {
					So(lpe.plugin.LoadedPluginName, ShouldEqual, "mock")
				})
				Convey("So swapped plugins event should show loaded plugin version as 1", func() {
					So(lpe.plugin.LoadedPluginVersion, ShouldEqual, 1)
				})
				Convey("So swapped plugins event should show unloaded plugin name as mock", func() {
					So(lpe.plugin.UnloadedPluginName, ShouldEqual, "mock")
				})
				Convey("So swapped plugins event should show unloaded plugin version as 2", func() {
					So(lpe.plugin.UnloadedPluginVersion, ShouldEqual, 2)
				})
				Convey("So swapped plugins event should show plugin type as collector", func() {
					So(lpe.plugin.PluginType, ShouldEqual, int(plugin.CollectorPluginType))
				})
			})
		})

		// Swap plugin with a different type of plugin
		Convey("First plugin in catalog", t, func() {
			Convey("Should have name mock", func() {
				So(c.PluginCatalog()[0].Name(), ShouldEqual, "mock")
			})
		})

		filePath := strings.Replace(PluginPath, "snap-collector-mock2", "snap-publisher-file", 1)
		fileRP, _ := core.NewRequestedPlugin(filePath)
		err = c.SwapPlugins(fileRP, c.PluginCatalog()[0])
		Convey("Swapping mock and file plugins", t, func() {
			Convey("Should error", func() {
				So(err, ShouldNotBeNil)
			})
		})

		//
		// TODO: Write a proper rollback test as previous test was not testing rollback
		//

		// Rollback will throw an error if a plugin can not unload

		Convey("Rollback failure returns error", t, func() {
			lp := c.PluginCatalog()[0]
			pm := new(MockPluginManagerBadSwap)
			pm.ExistingPlugin = lp
			c.pluginManager = pm

			mockRP, _ := core.NewRequestedPlugin(mock1Path)
			err := c.SwapPlugins(mockRP, lp)
			Convey("So err should be received if rollback fails", func() {
				So(err, ShouldNotBeNil)
			})
		})

		c.Stop()
		time.Sleep(100 * time.Millisecond)
	}
}
Exemplo n.º 8
0
func (w worker) loadPlugin(plugin core.Plugin) error {
	logger := w.logger.WithFields(log.Fields{
		"plugin-name":    plugin.Name(),
		"plugin-version": plugin.Version(),
		"plugin-type":    plugin.TypeName(),
		"_block":         "load-plugin",
	})
	if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
		return nil
	}
	members, err := w.memberManager.GetPluginAgreementMembers()
	if err != nil {
		logger.Error(err)
		return err
	}
	for _, member := range shuffle(members) {
		url := fmt.Sprintf("%s://%s:%s/v1/plugins/%s/%s/%d?download=true", member.GetRestProto(), member.GetAddr(), member.GetRestPort(), plugin.TypeName(), plugin.Name(), plugin.Version())
		c, err := client.New(url, "v1", member.GetRestInsecureSkipVerify(), client.Password(w.memberManager.GetRequestPassword()))
		if err != nil {
			logger.WithFields(log.Fields{
				"err": err,
				"url": url,
			}).Info("unable to create client")
			continue
		}
		resp, err := c.TribeRequest()
		if err != nil {
			logger.WithFields(log.Fields{
				"err": err,
				"url": url,
			}).Info("plugin not found")
			continue
		}
		if resp.StatusCode == 200 {
			if resp.Header.Get("Content-Type") != "application/x-gzip" {
				logger.WithField("content-type", resp.Header.Get("Content-Type")).Error("Expected application/x-gzip")
			}
			dir, err := ioutil.TempDir("", "")
			if err != nil {
				logger.Error(err)
				return err
			}
			f, err := os.Create(path.Join(dir, fmt.Sprintf("%s-%s-%d", plugin.TypeName(), plugin.Name(), plugin.Version())))
			if err != nil {
				logger.Error(err)
				f.Close()
				return err
			}
			io.Copy(f, resp.Body)
			f.Close()
			err = os.Chmod(f.Name(), 0700)
			if err != nil {
				logger.Error(err)
				return err
			}
			rp, err := core.NewRequestedPlugin(f.Name())
			if err != nil {
				logger.Error(err)
				return err
			}
			_, err = w.pluginManager.Load(rp)
			if err != nil {
				logger.Error(err)
				return err
			}
			if w.isPluginLoaded(plugin.Name(), plugin.TypeName(), plugin.Version()) {
				return nil
			}
			return errors.New("failed to load plugin")
		}
	}
	return errors.New("failed to find a member with the plugin")
}
Exemplo n.º 9
0
// This test is meant to cover the grpc implementation of the subset of control
// features that scheduler uses. It is not intended to test the control features
// themselves, only that we are correctly passing data over grpc and correctly
// passing success/errors.
func TestGRPCServerScheduler(t *testing.T) {
	l, _ := net.Listen("tcp", ":0")
	l.Close()
	cfg := GetDefaultConfig()
	cfg.ListenPort = l.Addr().(*net.TCPAddr).Port
	c := New(cfg)
	err := c.Start()

	Convey("Starting control_proxy server/client", t, func() {
		Convey("So err should be nil", func() {
			So(err, ShouldBeNil)
		})
	})
	// Load 3 plugins
	// collector -- mock
	// processor -- passthru
	// publisher -- file
	mock, err := core.NewRequestedPlugin(fixtures.JSONRPCPluginPath)
	if err != nil {
		log.Fatal(err)
	}
	c.Load(mock)
	passthru, err := core.NewRequestedPlugin(path.Join(fixtures.SnapPath, "plugin", "snap-processor-passthru"))
	if err != nil {
		log.Fatal(err)
	}
	c.Load(passthru)
	filepub, err := core.NewRequestedPlugin(path.Join(fixtures.SnapPath, "plugin", "snap-publisher-file"))
	if err != nil {
		log.Fatal(err)
	}
	c.Load(filepub)

	conn, err := rpcutil.GetClientConnection(c.Config.ListenAddr, c.Config.ListenPort)

	Convey("Creating an rpc connection", t, func() {
		Convey("Should not error", func() {
			So(err, ShouldBeNil)
		})
	})

	client := rpc.NewMetricManagerClient(conn)

	Convey("Creating an RPC client to control RPC server", t, func() {
		Convey("And a client should exist", func() {
			So(client, ShouldNotBeNil)
		})
	})
	//GetContentTypes
	Convey("Getting Content Types", t, func() {
		Convey("Should err if invalid plugin given", func() {
			req := &rpc.GetPluginContentTypesRequest{
				Name:       "bogus",
				PluginType: int32(0),
				Version:    int32(0),
			}
			reply, err := client.GetPluginContentTypes(context.Background(), req)
			// We don't expect rpc errors
			So(err, ShouldBeNil)
			So(reply.Error, ShouldNotEqual, "")
			So(reply.Error, ShouldResemble, "plugin not found")
		})
		Convey("Should return content types with valid plugin", func() {
			req := &rpc.GetPluginContentTypesRequest{
				Name:       "mock",
				PluginType: int32(0),
				Version:    0,
			}
			reply, err := client.GetPluginContentTypes(context.Background(), req)
			So(err, ShouldBeNil)
			So(reply.Error, ShouldEqual, "")
			So(reply.AcceptedTypes, ShouldContain, "snap.gob")
			So(reply.ReturnedTypes, ShouldContain, "snap.gob")
		})
	})

	// Verify that validate deps is properly passing through errors
	Convey("Validating Deps", t, func() {
		Convey("Should Fail if given invalid info", func() {
			req := &rpc.ValidateDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.InvalidMetric}),
				Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{}),
			}
			reply, err := client.ValidateDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})
		Convey("with valid metrics", func() {
			req := &rpc.ValidateDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{}),
			}
			reply, err := client.ValidateDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})
	})
	//Subscribe Deps: valid/invalid
	Convey("SubscribeDeps", t, func() {
		Convey("Should Error with invalid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.InvalidMetric}),
				Plugins: common.ToCorePluginsMsg([]core.Plugin{}),
				TaskId:  "my-snowflake-id",
			}
			reply, err := client.SubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
			So(reply.Errors[0].ErrorString, ShouldResemble, "Metric not found: /this/is/invalid")
		})
		Convey("Should not error with valid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				Plugins: common.ToCorePluginsMsg([]core.Plugin{}),
				TaskId:  "my-snowflake-id",
			}
			reply, err := client.SubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
		})
	})
	// unsubscribedeps -- valid/invalid
	Convey("UnsubscribeDeps", t, func() {
		Convey("Should Error with invalid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.InvalidMetric}),
				Plugins: common.ToCorePluginsMsg([]core.Plugin{}),
				TaskId:  "my-snowflake-id",
			}
			reply, err := client.UnsubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
			So(reply.Errors[0].ErrorString, ShouldResemble, "Metric not found: /this/is/invalid")
		})
		Convey("Should not error with valid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				Plugins: common.ToCorePluginsMsg([]core.Plugin{}),
				TaskId:  "my-snowflake-id",
			}
			reply, err := client.UnsubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
		})
	})
	//matchquerytonamespaces -- valid/invalid
	Convey("MatchingQueryToNamespaces", t, func() {
		Convey("Should error with invalid inputs", func() {
			req := &rpc.ExpandWildcardsRequest{
				Namespace: common.ToNamespace(fixtures.InvalidMetric.Namespace()),
			}
			reply, err := client.MatchQueryToNamespaces(context.Background(), req)
			// we don't expect rpc.errors
			So(err, ShouldBeNil)
			So(reply.Error, ShouldNotBeNil)
			So(reply.Error.ErrorString, ShouldResemble, "Metric not found: /this/is/invalid")
		})
		Convey("Should not error with invalid inputs", func() {
			req := &rpc.ExpandWildcardsRequest{
				Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()),
			}
			reply, err := client.MatchQueryToNamespaces(context.Background(), req)
			// we don't expect rpc.errors
			So(err, ShouldBeNil)
			So(reply.Error, ShouldBeNil)
		})
	})
	//expandwildcards -- valid/invalid
	Convey("ExpandWildcards", t, func() {
		Convey("Should error with invalid inputs", func() {
			req := &rpc.ExpandWildcardsRequest{
				Namespace: common.ToNamespace(fixtures.InvalidMetric.Namespace()),
			}
			reply, err := client.ExpandWildcards(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(reply.Error, ShouldNotBeNil)
			So(reply.Error.ErrorString, ShouldResemble, "Metric not found: /this/is/invalid")
		})
		Convey("Should not error with valid inputs", func() {
			req := &rpc.ExpandWildcardsRequest{
				Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()),
			}
			reply, err := client.ExpandWildcards(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(reply.Error, ShouldBeNil)
		})
	})
	// start plugin pools/provide task info so we can do collect/process/publishMetrics
	// errors here indicate problems outside the scope of this test.
	plugins := []string{"collector:mock:1", "processor:passthru:1", "publisher:file:3"}
	lps := make([]*loadedPlugin, len(plugins))
	pools := make([]strategy.Pool, len(plugins))
	for i, v := range plugins {
		lps[i], err = c.pluginManager.get(v)
		if err != nil {
			log.Fatal(err)
		}
		pools[i], err = c.pluginRunner.AvailablePlugins().getOrCreatePool(v)
		if err != nil {
			log.Fatal(err)
		}
		pools[i].Subscribe("my-snowflake-id", strategy.BoundSubscriptionType)
		err = c.pluginRunner.runPlugin(lps[i].Details)
		if err != nil {
			log.Fatal(err)
		}
	}
	//our returned metrics
	var mts []core.Metric
	//collect
	Convey("CollectMetrics", t, func() {
		Convey("Should error with invalid inputs", func() {
			req := &rpc.CollectMetricsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.InvalidMetric}),
				Deadline: &common.Time{
					Sec:  int64(time.Now().Unix()),
					Nsec: int64(time.Now().Nanosecond()),
				},
				TaskID: "my-snowflake-id",
			}
			reply, err := client.CollectMetrics(context.Background(), req)
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})
		Convey("should not error with valid inputs", func() {
			req := &rpc.CollectMetricsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				Deadline: &common.Time{
					Sec:  int64(time.Now().Unix()),
					Nsec: int64(time.Now().Nanosecond()),
				},
				TaskID: "my-snowflake-id",
			}
			reply, err := client.CollectMetrics(context.Background(), req)
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
			So(reply.Metrics[0].Namespace, ShouldResemble, common.ToNamespace(fixtures.ValidMetric.Namespace()))
			// Used in a later test as metrics to be passed to processor
			mts = common.ToCoreMetrics(reply.Metrics)
		})
	})
	//our content to pass to publish
	var content []byte
	//process
	Convey("ProcessMetrics", t, func() {
		Convey("Should error with invalid inputs", func() {
			req := controlproxy.GetPubProcReq("snap.gob", []byte{}, "bad name", 1, map[string]ctypes.ConfigValue{}, "my-snowflake-id")
			reply, err := client.ProcessMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
			So(reply.Errors[0], ShouldResemble, "bad key")
		})
		Convey("should not error with valid inputs", func() {
			var buf bytes.Buffer
			enc := gob.NewEncoder(&buf)
			metrics := make([]plugin.MetricType, len(mts))
			for i, m := range mts {
				metrics[i] = m.(plugin.MetricType)
			}
			enc.Encode(metrics)
			req := controlproxy.GetPubProcReq("snap.gob", buf.Bytes(), "passthru", 1, map[string]ctypes.ConfigValue{}, "my-snowflake-id")
			reply, err := client.ProcessMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
			// content to pass to publisher
			content = reply.Content

		})
	})
	//publishmetrics
	Convey("PublishMetrics", t, func() {
		Convey("Should error with invalid inputs", func() {
			req := controlproxy.GetPubProcReq("snap.gob", []byte{}, "bad name", 1, map[string]ctypes.ConfigValue{}, "my-snowflake-id")
			reply, err := client.PublishMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)

			So(reply.Errors[0], ShouldResemble, "bad key")
		})
		// Publish only returns no errors on success
		Convey("should not error with valid inputs", func() {
			config := make(map[string]ctypes.ConfigValue)
			config["file"] = ctypes.ConfigValueStr{Value: "/tmp/grpcservertest.snap"}
			req := controlproxy.GetPubProcReq("snap.gob", content, "file", 3, config, "my-snowflake-id")
			reply, err := client.PublishMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
		})
	})
}
Exemplo n.º 10
0
func (s *Server) loadPlugin(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
	mediaType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
	if err != nil {
		respond(500, rbody.FromError(err), w)
		return
	}
	if strings.HasPrefix(mediaType, "multipart/") {
		var pluginPath string
		var signature []byte
		var checkSum [sha256.Size]byte
		lp := &rbody.PluginsLoaded{}
		lp.LoadedPlugins = make([]rbody.LoadedPlugin, 0)
		mr := multipart.NewReader(r.Body, params["boundary"])
		var i int
		for {
			var b []byte
			p, err := mr.NextPart()
			if err == io.EOF {
				break
			}
			if err != nil {
				respond(500, rbody.FromError(err), w)
				return
			}
			if r.Header.Get("Plugin-Compression") == "gzip" {
				g, err := gzip.NewReader(p)
				defer g.Close()
				if err != nil {
					respond(500, rbody.FromError(err), w)
					return
				}
				b, err = ioutil.ReadAll(g)
				if err != nil {
					respond(500, rbody.FromError(err), w)
					return
				}
			} else {
				b, err = ioutil.ReadAll(p)
				if err != nil {
					respond(500, rbody.FromError(err), w)
					return
				}
			}

			// A little sanity checking for files being passed into the API server.
			// First file passed in should be the plugin. If the first file is a signature
			// file, an error is returned. The signature file should be the second
			// file passed to the API server. If the second file does not have the ".asc"
			// extension, an error is returned.
			// If we loop around more than twice before receiving io.EOF, then
			// an error is returned.

			switch {
			case i == 0:
				if filepath.Ext(p.FileName()) == ".asc" {
					e := errors.New("Error: first file passed to load plugin api can not be signature file")
					respond(500, rbody.FromError(e), w)
					return
				}
				if pluginPath, err = writeFile(p.FileName(), b); err != nil {
					respond(500, rbody.FromError(err), w)
					return
				}
				checkSum = sha256.Sum256(b)
			case i == 1:
				if filepath.Ext(p.FileName()) == ".asc" {
					signature = b
				} else {
					e := errors.New("Error: second file passed was not a signature file")
					respond(500, rbody.FromError(e), w)
					return
				}
			case i == 2:
				e := errors.New("Error: More than two files passed to the load plugin api")
				respond(500, rbody.FromError(e), w)
				return
			}
			i++
		}
		rp, err := core.NewRequestedPlugin(pluginPath)
		if err != nil {
			respond(500, rbody.FromError(err), w)
			return
		}
		// Sanity check, verify the checkSum on the file sent is the same
		// as after it is written to disk.
		if rp.CheckSum() != checkSum {
			e := errors.New("Error: CheckSum mismatch on requested plugin to load")
			respond(500, rbody.FromError(e), w)
			return
		}
		rp.SetSignature(signature)
		restLogger.Info("Loading plugin: ", rp.Path())
		pl, err := s.mm.Load(rp)
		if err != nil {
			var ec int
			restLogger.Error(err)
			restLogger.Debugf("Removing file (%s)", rp.Path())
			err2 := os.RemoveAll(filepath.Dir(rp.Path()))
			if err2 != nil {
				restLogger.Error(err2)
			}
			rb := rbody.FromError(err)
			switch rb.ResponseBodyMessage() {
			case PluginAlreadyLoaded:
				ec = 409
			default:
				ec = 500
			}
			respond(ec, rb, w)
			return
		}
		lp.LoadedPlugins = append(lp.LoadedPlugins, *catalogedPluginToLoaded(r.Host, pl))
		respond(201, lp, w)
	}
}
Exemplo n.º 11
0
// Begin handling load, unload, and inventory
func (p *pluginControl) Start() error {
	// Start pluginManager when pluginControl starts
	p.Started = true
	controlLogger.WithFields(log.Fields{
		"_block": "start",
	}).Info("control started")

	//Autodiscover
	if p.Config.AutoDiscoverPath != "" {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is enabled")
		paths := filepath.SplitList(p.Config.AutoDiscoverPath)
		p.SetAutodiscoverPaths(paths)
		for _, pa := range paths {
			fullPath, err := filepath.Abs(pa)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			controlLogger.WithFields(log.Fields{
				"_block": "start",
			}).Info("autoloading plugins from: ", fullPath)
			files, err := ioutil.ReadDir(fullPath)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			for _, file := range files {
				if file.IsDir() {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring subdirectory: ", file.Name())
					continue
				}
				// Ignore tasks files (JSON and YAML)
				fname := strings.ToLower(file.Name())
				if strings.HasSuffix(fname, ".json") || strings.HasSuffix(fname, ".yaml") || strings.HasSuffix(fname, ".yml") {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring JSON/Yaml file: ", file.Name())
					continue
				}
				// if the file is a plugin package (which would have a suffix of '.aci') or if the file
				// is not a plugin signing file (which would have a suffix of '.asc'), then attempt to
				// automatically load the file as a plugin
				if strings.HasSuffix(file.Name(), ".aci") || !(strings.HasSuffix(file.Name(), ".asc")) {
					// check to makd sure the file is executable by someone (even if it isn't you); if no one
					// can execute this file then skip it (and include a warning in the log output)
					if (file.Mode() & 0111) == 0 {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": pa,
							"plugin":           file,
						}).Warn("Auto-loading of plugin '", file.Name(), "' skipped (plugin not executable)")
						continue
					}
					rp, err := core.NewRequestedPlugin(path.Join(fullPath, file.Name()))
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": pa,
							"plugin":           file,
						}).Error(err)
					}
					signatureFile := file.Name() + ".asc"
					if _, err := os.Stat(path.Join(fullPath, signatureFile)); err == nil {
						err = rp.ReadSignatureFile(path.Join(fullPath, signatureFile))
						if err != nil {
							controlLogger.WithFields(log.Fields{
								"_block":           "start",
								"autodiscoverpath": pa,
								"plugin":           file.Name() + ".asc",
							}).Error(err)
						}
					}
					pl, err := p.Load(rp)
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin":           file,
						}).Error(err)
					} else {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin-file-name": file.Name(),
							"plugin-name":      pl.Name(),
							"plugin-version":   pl.Version(),
							"plugin-type":      pl.TypeName(),
						}).Info("Loading plugin")
					}
				}
			}
		}
	} else {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is disabled")
	}

	lis, err := net.Listen("tcp", fmt.Sprintf("%v:%v", p.Config.ListenAddr, p.Config.ListenPort))
	if err != nil {
		controlLogger.WithField("error", err.Error()).Error("Failed to start control grpc listener")
		return err
	}

	opts := []grpc.ServerOption{}
	p.closingChan = make(chan bool, 1)
	p.grpcServer = grpc.NewServer(opts...)
	rpc.RegisterMetricManagerServer(p.grpcServer, &ControlGRPCServer{p})
	p.wg.Add(1)
	go func() {
		defer p.wg.Done()
		err := p.grpcServer.Serve(lis)
		if err != nil {
			select {
			case <-p.closingChan:
			// If we called Stop() then there will be a value in p.closingChan, so
			// we'll get here and we can exit without showing the error.
			default:
				controlLogger.Fatal(err)
			}
		}
	}()

	return nil
}
Exemplo n.º 12
0
func action(ctx *cli.Context) {
	// If logPath is set, we verify the logPath and set it so that all logging
	// goes to the log file instead of stdout.
	logPath := ctx.String("log-path")
	if logPath != "" {
		f, err := os.Stat(logPath)
		if err != nil {
			log.Fatal(err)
		}
		if !f.IsDir() {
			log.Fatal("log path provided must be a directory")
		}

		file, err := os.OpenFile(fmt.Sprintf("%s/snap.log", logPath), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
		if err != nil {
			log.Fatal(err)
		}
		defer file.Close()
		log.SetOutput(file)
	}

	var l = map[int]string{
		1: "debug",
		2: "info",
		3: "warning",
		4: "error",
		5: "fatal",
	}

	var t = map[int]string{
		0: "disabled",
		1: "enabled",
		2: "warning",
	}

	logLevel := ctx.Int("log-level")
	maxProcs := ctx.Int("max-procs")
	disableAPI := ctx.Bool("disable-api")
	apiPort := ctx.Int("api-port")
	autodiscoverPath := ctx.String("auto-discover")
	maxRunning := ctx.Int("max-running-plugins")
	pluginTrust := ctx.Int("plugin-trust")
	keyringPaths := ctx.String("keyring-files")
	cachestr := ctx.String("cache-expiration")
	isTribeEnabled := ctx.Bool("tribe")
	tribeSeed := ctx.String("tribe-seed")
	tribeNodeName := ctx.String("tribe-node-name")
	tribeAddr := ctx.String("tribe-addr")
	tribePort := ctx.Int("tribe-port")
	cache, err := time.ParseDuration(cachestr)
	if err != nil {
		log.Fatal(fmt.Sprintf("invalid cache-expiration format: %s", cachestr))
	}
	config := ctx.String("config")
	restHttps := ctx.Bool("rest-https")
	restKey := ctx.String("rest-key")
	restCert := ctx.String("rest-cert")

	log.Info("Starting snapd (version: ", gitversion, ")")

	// Set Max Processors for snapd.
	setMaxProcs(maxProcs)

	// Validate log level and trust level settings for snapd
	validateLevelSettings(logLevel, pluginTrust)

	controlOpts := []control.PluginControlOpt{
		control.MaxRunningPlugins(maxRunning),
		control.CacheExpiration(cache),
	}

	if config != "" {
		b, err := ioutil.ReadFile(config)
		if err != nil {
			log.WithFields(log.Fields{
				"block":   "main",
				"_module": "snapd",
				"error":   err.Error(),
				"path":    config,
			}).Fatal("unable to read config")
		}
		cfg := control.NewConfig()
		err = json.Unmarshal(b, &cfg)
		if err != nil {
			log.WithFields(log.Fields{
				"block":   "main",
				"_module": "snapd",
				"error":   err.Error(),
				"path":    config,
			}).Fatal("invalid config")
		}
		controlOpts = append(controlOpts, control.OptSetConfig(cfg))
	}

	c := control.New(
		controlOpts...,
	)

	coreModules = []coreModule{}

	coreModules = append(coreModules, c)
	s := scheduler.New(
		scheduler.CollectQSizeOption(defaultQueueSize),
		scheduler.CollectWkrSizeOption(defaultPoolSize),
		scheduler.PublishQSizeOption(defaultQueueSize),
		scheduler.PublishWkrSizeOption(defaultPoolSize),
		scheduler.ProcessQSizeOption(defaultQueueSize),
		scheduler.ProcessWkrSizeOption(defaultPoolSize),
	)
	s.SetMetricManager(c)
	coreModules = append(coreModules, s)

	var tr managesTribe
	if isTribeEnabled {
		log.Info("Tribe is enabled")
		tc := tribe.DefaultConfig(tribeNodeName, tribeAddr, tribePort, tribeSeed, apiPort)
		t, err := tribe.New(tc)
		if err != nil {
			printErrorAndExit(t.Name(), err)
		}
		c.RegisterEventHandler("tribe", t)
		t.SetPluginCatalog(c)
		s.RegisterEventHandler("tribe", t)
		t.SetTaskManager(s)
		coreModules = append(coreModules, t)
		tr = t
	}

	// Set interrupt handling so we can die gracefully.
	startInterruptHandling(coreModules...)

	//  Start our modules
	var started []coreModule
	for _, m := range coreModules {
		if err := startModule(m); err != nil {
			for _, m := range started {
				m.Stop()
			}
			printErrorAndExit(m.Name(), err)
		}
		started = append(started, m)
	}

	//Plugin Trust
	c.SetPluginTrustLevel(pluginTrust)
	log.Info("setting plugin trust level to: ", t[pluginTrust])
	//Keyring checking for trust levels 1 and 2

	if pluginTrust > 0 {
		keyrings := filepath.SplitList(keyringPaths)
		if len(keyrings) == 0 {
			log.WithFields(
				log.Fields{
					"block":   "main",
					"_module": "snapd",
				}).Fatal("need keyring file when trust is on (--keyring-file or -k)")
		}
		for _, k := range keyrings {
			keyringPath, err := filepath.Abs(k)
			if err != nil {
				log.WithFields(
					log.Fields{
						"block":       "main",
						"_module":     "snapd",
						"error":       err.Error(),
						"keyringPath": keyringPath,
					}).Fatal("Unable to determine absolute path to keyring file")
			}
			f, err := os.Stat(keyringPath)
			if err != nil {
				log.WithFields(
					log.Fields{
						"block":       "main",
						"_module":     "snapd",
						"error":       err.Error(),
						"keyringPath": keyringPath,
					}).Fatal("bad keyring file")
			}
			if f.IsDir() {
				log.Info("Adding keyrings from: ", keyringPath)
				files, err := ioutil.ReadDir(keyringPath)
				if err != nil {
					log.WithFields(
						log.Fields{
							"_block":      "main",
							"_module":     "snapd",
							"error":       err.Error(),
							"keyringPath": keyringPath,
						}).Fatal(err)
				}
				if len(files) == 0 {
					log.Fatal(fmt.Sprintf("given keyring path [%s] is an empty directory!", keyringPath))
				}
				for _, keyringFile := range files {
					if keyringFile.IsDir() {
						continue
					}
					if strings.HasSuffix(keyringFile.Name(), ".gpg") || (strings.HasSuffix(keyringFile.Name(), ".pub")) || (strings.HasSuffix(keyringFile.Name(), ".pubring")) {
						f, err := os.Open(keyringPath)
						if err != nil {
							log.WithFields(
								log.Fields{
									"block":       "main",
									"_module":     "snapd",
									"error":       err.Error(),
									"keyringPath": keyringPath,
								}).Warning("unable to open keyring file. not adding to keyring path")
							continue
						}
						f.Close()
						log.Info("adding keyring file: ", keyringPath+"/"+keyringFile.Name())
						c.SetKeyringFile(keyringPath + "/" + keyringFile.Name())
					}
				}
			} else {
				f, err := os.Open(keyringPath)
				if err != nil {
					log.WithFields(
						log.Fields{
							"block":       "main",
							"_module":     "snapd",
							"error":       err.Error(),
							"keyringPath": keyringPath,
						}).Fatal("unable to open keyring file.")
				}
				f.Close()
				log.Info("adding keyring file ", keyringPath)
				c.SetKeyringFile(keyringPath)
			}
		}
	}

	//Autodiscover
	if autodiscoverPath != "" {
		log.Info("auto discover path is enabled")
		paths := filepath.SplitList(autodiscoverPath)
		c.SetAutodiscoverPaths(paths)
		for _, p := range paths {
			fullPath, err := filepath.Abs(p)
			if err != nil {
				log.WithFields(
					log.Fields{
						"_block":           "main",
						"_module":          "snapd",
						"autodiscoverpath": p,
					}).Fatal(err)
			}
			log.Info("autoloading plugins from: ", fullPath)
			files, err := ioutil.ReadDir(fullPath)
			if err != nil {
				log.WithFields(
					log.Fields{
						"_block":           "main",
						"_module":          "snapd",
						"autodiscoverpath": fullPath,
					}).Fatal(err)
			}
			for _, file := range files {
				if file.IsDir() {
					continue
				}
				if strings.HasSuffix(file.Name(), ".aci") || !(strings.HasSuffix(file.Name(), ".asc")) {
					rp, err := core.NewRequestedPlugin(path.Join(fullPath, file.Name()))
					if err != nil {
						log.WithFields(log.Fields{
							"_block":           "main",
							"_module":          "snapd",
							"autodiscoverpath": fullPath,
							"plugin":           file,
						}).Error(err)
					}
					signatureFile := file.Name() + ".asc"
					if _, err := os.Stat(path.Join(fullPath, signatureFile)); err == nil {
						err = rp.ReadSignatureFile(path.Join(fullPath, signatureFile))
						if err != nil {
							log.WithFields(log.Fields{
								"_block":           "main",
								"_module":          "snapd",
								"autodiscoverpath": fullPath,
								"plugin":           file.Name() + ".asc",
							}).Error(err)
						}
					}
					pl, err := c.Load(rp)
					if err != nil {
						log.WithFields(log.Fields{
							"_block":           "main",
							"_module":          "snapd",
							"autodiscoverpath": fullPath,
							"plugin":           file,
						}).Error(err)
					} else {
						log.WithFields(log.Fields{
							"_block":           "main",
							"_module":          "snapd",
							"autodiscoverpath": fullPath,
							"plugin":           file,
							"plugin-name":      pl.Name(),
							"plugin-version":   pl.Version(),
							"plugin-type":      pl.TypeName(),
						}).Info("Loading plugin")
					}
				}
			}
		}
	} else {
		log.Info("auto discover path is disabled")
	}

	//API
	if !disableAPI {
		r, err := rest.New(restHttps, restCert, restKey)
		if err != nil {
			log.Fatal(err)
			return
		}
		r.BindMetricManager(c)
		r.BindConfigManager(c.Config)
		r.BindTaskManager(s)
		if tr != nil {
			r.BindTribeManager(tr)
		}
		r.Start(fmt.Sprintf(":%d", apiPort))
		go monitorErrors(r.Err())
		log.Info("Rest API is enabled")
	} else {
		log.Info("Rest API is disabled")
	}

	log.WithFields(
		log.Fields{
			"block":   "main",
			"_module": "snapd",
		}).Info("snapd started")

	// Switch log level to user defined
	log.Info("setting log level to: ", l[logLevel])
	log.SetLevel(getLevel(logLevel))

	select {} //run forever and ever
}
Exemplo n.º 13
0
// Begin handling load, unload, and inventory
func (p *pluginControl) Start() error {
	// Start pluginManager when pluginControl starts
	p.Started = true
	controlLogger.WithFields(log.Fields{
		"_block": "start",
	}).Info("control started")

	//Autodiscover
	if p.Config.AutoDiscoverPath != "" {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is enabled")
		paths := filepath.SplitList(p.Config.AutoDiscoverPath)
		p.SetAutodiscoverPaths(paths)
		for _, pa := range paths {
			fullPath, err := filepath.Abs(pa)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			controlLogger.WithFields(log.Fields{
				"_block": "start",
			}).Info("autoloading plugins from: ", fullPath)
			files, err := ioutil.ReadDir(fullPath)
			if err != nil {
				controlLogger.WithFields(log.Fields{
					"_block":           "start",
					"autodiscoverpath": pa,
				}).Fatal(err)
			}
			for _, file := range files {
				if file.IsDir() {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring subdirectory: ", file.Name())
					continue
				}
				// Ignore tasks files (JSON and YAML)
				fname := strings.ToLower(file.Name())
				if strings.HasSuffix(fname, ".json") || strings.HasSuffix(fname, ".yaml") || strings.HasSuffix(fname, ".yml") {
					controlLogger.WithFields(log.Fields{
						"_block":           "start",
						"autodiscoverpath": pa,
					}).Warning("Ignoring JSON/Yaml file: ", file.Name())
					continue
				}
				if strings.HasSuffix(file.Name(), ".aci") || !(strings.HasSuffix(file.Name(), ".asc")) {
					rp, err := core.NewRequestedPlugin(path.Join(fullPath, file.Name()))
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": pa,
							"plugin":           file,
						}).Error(err)
					}
					signatureFile := file.Name() + ".asc"
					if _, err := os.Stat(path.Join(fullPath, signatureFile)); err == nil {
						err = rp.ReadSignatureFile(path.Join(fullPath, signatureFile))
						if err != nil {
							controlLogger.WithFields(log.Fields{
								"_block":           "start",
								"autodiscoverpath": pa,
								"plugin":           file.Name() + ".asc",
							}).Error(err)
						}
					}
					pl, err := p.Load(rp)
					if err != nil {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin":           file,
						}).Error(err)
					} else {
						controlLogger.WithFields(log.Fields{
							"_block":           "start",
							"autodiscoverpath": fullPath,
							"plugin-file-name": file.Name(),
							"plugin-name":      pl.Name(),
							"plugin-version":   pl.Version(),
							"plugin-type":      pl.TypeName(),
						}).Info("Loading plugin")
					}
				}
			}
		}
	} else {
		controlLogger.WithFields(log.Fields{
			"_block": "start",
		}).Info("auto discover path is disabled")
	}

	lis, err := net.Listen("tcp", fmt.Sprintf("%v:%v", p.Config.ListenAddr, p.Config.ListenPort))
	if err != nil {
		controlLogger.WithField("error", err.Error()).Error("Failed to start control grpc listener")
		return err
	}

	opts := []grpc.ServerOption{}
	grpcServer := grpc.NewServer(opts...)
	rpc.RegisterMetricManagerServer(grpcServer, &ControlGRPCServer{p})
	go func() {
		err := grpcServer.Serve(lis)
		if err != nil {
			controlLogger.Fatal(err)
		}
	}()

	return nil
}
Exemplo n.º 14
0
func TestDistributedWorkflow(t *testing.T) {
	Convey("Create a scheduler with 2 controls and load plugins", t, func() {
		l, _ := net.Listen("tcp", ":0")
		l.Close()
		cfg := control.GetDefaultConfig()
		cfg.ListenPort = l.Addr().(*net.TCPAddr).Port
		c1 := control.New(cfg)
		c1.Start()
		m, _ := net.Listen("tcp", ":0")
		m.Close()
		cfg.ListenPort = m.Addr().(*net.TCPAddr).Port
		port1 := cfg.ListenPort
		c2 := control.New(cfg)
		schcfg := GetDefaultConfig()
		sch := New(schcfg)
		c2.Start()
		sch.SetMetricManager(c1)
		err := sch.Start()
		So(err, ShouldBeNil)
		// Load appropriate plugins into each control.
		mock2Path := helper.PluginFilePath("snap-plugin-collector-mock2")
		passthruPath := helper.PluginFilePath("snap-plugin-processor-passthru")
		filePath := helper.PluginFilePath("snap-plugin-publisher-mock-file")

		// mock2 and file onto c1

		rp, err := core.NewRequestedPlugin(mock2Path)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		rp, err = core.NewRequestedPlugin(filePath)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		// passthru on c2
		rp, err = core.NewRequestedPlugin(passthruPath)
		So(err, ShouldBeNil)
		passthru, err := c2.Load(rp)
		So(err, ShouldBeNil)

		Convey("Test task with one local and one remote node", func() {
			//Create a task
			//Create a workflowmap
			wf := dsWFMap(port1)
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			// stop the scheduler and control (since in nested Convey statements, the
			// statements in the outer Convey execute for each of the inner Conveys
			// independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order
			// for details on execution order in Convey)
			sch.Stop()
			c2.Stop()
		})

		Convey("Test task with invalid remote port", func() {
			wf := dsWFMap(0)
			controlproxy.MAX_CONNECTION_TIMEOUT = 1 * time.Second
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 1)
			So(t, ShouldBeNil)
			// stop the scheduler and control (since in nested Convey statements, the
			// statements in the outer Convey execute for each of the inner Conveys
			// independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order
			// for details on execution order in Convey)
			sch.Stop()
			c2.Stop()
		})

		Convey("Test task without remote plugin", func() {
			_, err := c2.Unload(passthru)
			So(err, ShouldBeNil)
			wf := dsWFMap(port1)
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 1)
			So(t, ShouldBeNil)
			// stop the scheduler and control (since in nested Convey statements, the
			// statements in the outer Convey execute for each of the inner Conveys
			// independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order
			// for details on execution order in Convey)
			sch.Stop()
			c2.Stop()
		})

		Convey("Test task failing when control is stopped while task is running", func() {
			wf := dsWFMap(port1)
			// set timeout so that connection attempt through the controlproxy will fail after 1 second
			controlproxy.MAX_CONNECTION_TIMEOUT = time.Second
			// define an interval that the simple scheduler will run on every 100ms
			interval := time.Millisecond * 100
			// create our task; should be disabled after 3 failures
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(interval), wf, true)
			// ensure task was created successfully
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			// create a channel to listen on for a response and setup an event handler
			// that will respond on that channel once the 'TaskDisabledEvent'  arrives
			respChan := make(chan struct{})
			sch.RegisterEventHandler("test", &failHandler{respChan})
			// then stop the controller
			c2.Stop()
			// and wait for the response (with a 30 second timeout; just in case)
			var ok bool
			select {
			case <-time.After(30 * time.Second):
				// if get here, the select timed out waiting for a response; we don't
				// expect to hit this timeout since it should only take 3 seconds for
				// the workflow to fail to connect to the gRPC server three times, but
				// it might if the task did not fail as expected
				So("Timeout triggered waiting for disabled event", ShouldBeBlank)
			case <-respChan:
				// if get here, we got a response on the respChan
				ok = true
			}
			So(ok, ShouldEqual, true)
			// stop the scheduler (since in nested Convey statements, the
			// statements in the outer Convey execute for each of the inner Conveys
			// independently; see https://github.com/smartystreets/goconvey/wiki/Execution-order
			// for details on execution order in Convey)
			sch.Stop()
		})

	})

}
Exemplo n.º 15
0
func TestDistributedSubscriptions(t *testing.T) {

	Convey("Load control/scheduler with a mock remote scheduler", t, func() {
		l, _ := net.Listen("tcp", ":0")
		l.Close()
		cfg := control.GetDefaultConfig()
		cfg.ListenPort = l.Addr().(*net.TCPAddr).Port
		c1 := control.New(cfg)
		c1.Start()
		m, _ := net.Listen("tcp", ":0")
		m.Close()
		cfg.ListenPort = m.Addr().(*net.TCPAddr).Port
		port1 := cfg.ListenPort
		c2 := control.New(cfg)
		schcfg := GetDefaultConfig()
		sch := New(schcfg)
		c2.Start()
		sch.SetMetricManager(c1)
		err := sch.Start()
		So(err, ShouldBeNil)
		// Load appropriate plugins into each control.
		mock2Path := helper.PluginFilePath("snap-plugin-collector-mock2")
		passthruPath := helper.PluginFilePath("snap-plugin-processor-passthru")
		filePath := helper.PluginFilePath("snap-plugin-publisher-mock-file")

		// mock2 and file onto c1

		rp, err := core.NewRequestedPlugin(mock2Path)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		rp, err = core.NewRequestedPlugin(filePath)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		// passthru on c2
		rp, err = core.NewRequestedPlugin(passthruPath)
		So(err, ShouldBeNil)
		_, err = c2.Load(rp)
		So(err, ShouldBeNil)

		Convey("Starting task should not succeed if remote dep fails to subscribe", func() {
			//Create a task
			//Create a workflowmap
			wf := dsWFMap(port1)
			// Create a task that is not started immediately so we can
			// validate deps correctly.
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			schTask := t.(*task)
			remoteMockManager := &subscriptionManager{Fail: true}
			schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager)
			localMockManager := &subscriptionManager{Fail: false}
			schTask.RemoteManagers.Add("", localMockManager)
			// Start task. We expect it to fail while subscribing deps
			terrs := sch.StartTask(t.ID())
			So(terrs, ShouldNotBeNil)
			Convey("So dependencies should have been unsubscribed", func() {
				// Ensure that unsubscribe call count is equal to subscribe call count
				// i.e that every subscribe call was followed by an unsubscribe since
				// we errored
				So(remoteMockManager.UnsubscribeCallCount, ShouldEqual, remoteMockManager.SubscribeCallCount)
				So(localMockManager.UnsubscribeCallCount, ShouldEqual, localMockManager.UnsubscribeCallCount)
			})
		})

		Convey("Starting task should not succeed if missing local dep fails to subscribe", func() {
			//Create a task
			//Create a workflowmap
			wf := dsWFMap(port1)
			// Create a task that is not started immediately so we can
			// validate deps correctly.
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			schTask := t.(*task)
			localMockManager := &subscriptionManager{Fail: true}
			schTask.RemoteManagers.Add("", localMockManager)
			remoteMockManager := &subscriptionManager{Fail: false}
			schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager)

			// Start task. We expect it to fail while subscribing deps
			terrs := sch.StartTask(t.ID())
			So(terrs, ShouldNotBeNil)
			Convey("So dependencies should have been unsubscribed", func() {
				// Ensure that unsubscribe call count is equal to subscribe call count
				// i.e that every subscribe call was followed by an unsubscribe since
				// we errored
				So(remoteMockManager.UnsubscribeCallCount, ShouldEqual, remoteMockManager.SubscribeCallCount)
				So(localMockManager.UnsubscribeCallCount, ShouldEqual, localMockManager.UnsubscribeCallCount)
			})
		})

		Convey("Starting task should suceed if all deps are available", func() {
			//Create a task
			//Create a workflowmap
			wf := dsWFMap(port1)
			// Create a task that is not started immediately so we can
			// validate deps correctly.
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, false)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			schTask := t.(*task)
			localMockManager := &subscriptionManager{Fail: false}
			schTask.RemoteManagers.Add("", localMockManager)
			remoteMockManager := &subscriptionManager{Fail: false}
			schTask.RemoteManagers.Add(fmt.Sprintf("127.0.0.1:%v", port1), remoteMockManager)
			terrs := sch.StartTask(t.ID())
			So(terrs, ShouldBeNil)
			Convey("So all depndencies should have been subscribed to", func() {
				// Ensure that unsubscribe call count is equal to subscribe call count
				// i.e that every subscribe call was followed by an unsubscribe since
				// we errored
				So(localMockManager.SubscribeCallCount, ShouldBeGreaterThan, 0)
				So(remoteMockManager.SubscribeCallCount, ShouldBeGreaterThan, 0)
			})
		})
	})
}
Exemplo n.º 16
0
// This test is meant to cover the grpc implementation of the subset of control
// features that scheduler uses. It is not intended to test the control features
// themselves, only that we are correctly passing data over grpc and correctly
// passing success/errors.
func TestGRPCServerScheduler(t *testing.T) {
	l, _ := net.Listen("tcp", ":0")
	l.Close()
	cfg := GetDefaultConfig()
	cfg.ListenPort = l.Addr().(*net.TCPAddr).Port
	c := New(cfg)
	err := c.Start()

	lpe := newFoo()
	c.eventManager.RegisterHandler("Control.PluginLoaded", lpe)

	Convey("Starting control_proxy server/client", t, func() {
		Convey("So err should be nil", func() {
			So(err, ShouldBeNil)
		})
	})
	// Load 3 plugins
	// collector -- mock
	// processor -- passthru
	// publisher -- file
	mock, err := core.NewRequestedPlugin(fixtures.JSONRPCPluginPath)
	if err != nil {
		log.Fatal(err)
	}
	_, serr := c.Load(mock)
	Convey("Loading mock collector", t, func() {
		Convey("should not error", func() {
			So(serr, ShouldBeNil)
		})
	})
	<-lpe.done
	passthru, err := core.NewRequestedPlugin(helper.PluginFilePath("snap-plugin-processor-passthru"))
	if err != nil {
		log.Fatal(err)
	}
	catalogedPassthru, serr := c.Load(passthru)
	Convey("Loading passthru processor", t, func() {
		Convey("should not error", func() {
			So(serr, ShouldBeNil)
		})
	})
	subscribedPassThruPlugin := subscribedPlugin{
		name:     catalogedPassthru.Name(),
		version:  catalogedPassthru.Version(),
		typeName: catalogedPassthru.TypeName(),
	}
	<-lpe.done
	filepub, err := core.NewRequestedPlugin(helper.PluginFilePath("snap-plugin-publisher-mock-file"))
	if err != nil {
		log.Fatal(err)
	}
	catalogedFile, serr := c.Load(filepub)
	Convey("Loading file publisher", t, func() {
		Convey("should not error", func() {
			So(serr, ShouldBeNil)
		})
	})
	subscribedFilePlugin := subscribedPlugin{
		name:     catalogedFile.Name(),
		version:  catalogedFile.Version(),
		typeName: catalogedFile.TypeName(),
	}

	<-lpe.done
	conn, err := rpcutil.GetClientConnection(c.Config.ListenAddr, c.Config.ListenPort)

	Convey("Creating an rpc connection", t, func() {
		Convey("Should not error", func() {
			So(err, ShouldBeNil)
		})
	})

	client := rpc.NewMetricManagerClient(conn)

	Convey("Creating an RPC client to control RPC server", t, func() {
		Convey("And a client should exist", func() {
			So(client, ShouldNotBeNil)
		})
	})

	// Verify that validate deps is properly passing through errors
	Convey("Validating Deps", t, func() {
		Convey("Should Fail if given invalid info", func() {
			req := &rpc.ValidateDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.InvalidMetric}),
				Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{}),
			}
			reply, err := client.ValidateDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})
		Convey("with valid metrics", func() {
			req := &rpc.ValidateDepsRequest{
				Metrics: common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{}),
			}
			reply, err := client.ValidateDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})
	})
	//Subscribe Deps: valid/invalid
	Convey("SubscribeDeps", t, func() {
		Convey("Should Error with invalid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Requested: []*common.Metric{&common.Metric{Namespace: common.ToNamespace(fixtures.InvalidMetric.Namespace()), Version: int64(fixtures.InvalidMetric.Version())}},
				Plugins:   common.ToSubPluginsMsg([]core.SubscribedPlugin{subscribedFilePlugin, subscribedPassThruPlugin}),
				TaskId:    "my-snowflake-id",
			}
			reply, err := client.SubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
			So(reply.Errors[0].ErrorString, ShouldResemble, "Metric not found: /this/is/invalid (version: 1000)")
		})
		Convey("Should not error with valid inputs", func() {
			req := &rpc.SubscribeDepsRequest{
				Requested: []*common.Metric{&common.Metric{Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()), Version: int64(fixtures.ValidMetric.Version())}},
				Plugins:   common.ToSubPluginsMsg([]core.SubscribedPlugin{}),
				TaskId:    "my-snowflake-valid",
			}
			reply, err := client.SubscribeDeps(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
		})
	})
	//our returned metrics
	var mts []core.Metric
	//collect
	Convey("CollectMetrics", t, func() {
		req := &rpc.SubscribeDepsRequest{
			Requested: []*common.Metric{&common.Metric{Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()), Version: int64(fixtures.ValidMetric.Version())}},
			Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{
				subscribedPassThruPlugin,
				subscribedFilePlugin,
			},
			),
			TaskId: "my-snowflake-id",
		}
		_, err := client.SubscribeDeps(context.Background(), req)
		So(err, ShouldBeNil)

		Convey("should error with invalid inputs", func() {
			req := &rpc.CollectMetricsRequest{
				TaskID: "my-fake-snowflake-id",
			}
			reply, err := client.CollectMetrics(context.Background(), req)
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})

		Convey("should not error with valid inputs", func() {
			req := &rpc.CollectMetricsRequest{
				TaskID: "my-snowflake-valid",
			}
			reply, err := client.CollectMetrics(context.Background(), req)
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
			So(reply.Metrics[0].Namespace, ShouldResemble, common.ToNamespace(fixtures.ValidMetric.Namespace()))
			// Used in a later test as metrics to be passed to processor
			mts = common.ToCoreMetrics(reply.Metrics)
		})
	})

	//process
	Convey("ProcessMetrics", t, func() {
		req := &rpc.SubscribeDepsRequest{
			Requested: []*common.Metric{&common.Metric{Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()), Version: int64(fixtures.ValidMetric.Version())}},
			Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{
				subscribedPassThruPlugin,
				subscribedFilePlugin,
			},
			),
			TaskId: "my-snowflake-id",
		}
		_, err := client.SubscribeDeps(context.Background(), req)
		So(err, ShouldBeNil)
		Convey("should error with invalid inputs", func() {
			req := &rpc.PubProcMetricsRequest{
				Metrics:       common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				PluginName:    "passthru-invalid",
				PluginVersion: 1,
				TaskId:        "my-snowflake-id",
				Config:        common.ToConfigMap(map[string]ctypes.ConfigValue{}),
			}
			reply, err := client.ProcessMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
			// content to pass to publisher
		})
		Convey("should not error with valid inputs", func() {
			req := &rpc.PubProcMetricsRequest{
				Metrics:       common.NewMetrics(mts),
				PluginName:    "passthru",
				PluginVersion: 1,
				TaskId:        "my-snowflake-id",
				Config:        common.ToConfigMap(map[string]ctypes.ConfigValue{}),
			}
			reply, err := client.ProcessMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)

		})
	})
	//publishmetrics
	Convey("PublishMetrics", t, func() {
		req := &rpc.SubscribeDepsRequest{
			Requested: []*common.Metric{&common.Metric{Namespace: common.ToNamespace(fixtures.ValidMetric.Namespace()), Version: int64(fixtures.ValidMetric.Version())}},
			Plugins: common.ToSubPluginsMsg([]core.SubscribedPlugin{
				subscribedPassThruPlugin,
				subscribedFilePlugin,
			},
			),
			TaskId: "my-snowflake-id",
		}
		_, err := client.SubscribeDeps(context.Background(), req)
		So(err, ShouldBeNil)

		Convey("Should error with invalid inputs", func() {
			req := &rpc.PubProcMetricsRequest{
				Metrics:       common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				PluginName:    "mock-file-invalid",
				PluginVersion: 3,
				TaskId:        "my-snowflake-id",
				Config:        common.ToConfigMap(map[string]ctypes.ConfigValue{}),
			}
			reply, err := client.PublishMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldNotEqual, 0)
		})

		// Publish only returns no errors on success
		Convey("should not error with valid inputs", func() {
			config := make(map[string]ctypes.ConfigValue)
			config["file"] = ctypes.ConfigValueStr{Value: "/tmp/grpcservertest.snap"}
			req := &rpc.PubProcMetricsRequest{
				Metrics:       common.NewMetrics([]core.Metric{fixtures.ValidMetric}),
				PluginName:    "mock-file",
				PluginVersion: 3,
				TaskId:        "my-snowflake-id",
				Config:        common.ToConfigMap(config),
			}
			reply, err := client.PublishMetrics(context.Background(), req)
			// we don't expect rpc errors
			So(err, ShouldBeNil)
			So(len(reply.Errors), ShouldEqual, 0)
		})
	})
}
Exemplo n.º 17
0
func TestDistributedWorkflow(t *testing.T) {
	Convey("Create a scheduler with 2 controls and load plugins", t, func() {
		l, _ := net.Listen("tcp", ":0")
		l.Close()
		cfg := control.GetDefaultConfig()
		cfg.ListenPort = l.Addr().(*net.TCPAddr).Port
		c1 := control.New(cfg)
		c1.Start()
		m, _ := net.Listen("tcp", ":0")
		m.Close()
		cfg.ListenPort = m.Addr().(*net.TCPAddr).Port
		port1 := cfg.ListenPort
		c2 := control.New(cfg)
		schcfg := GetDefaultConfig()
		sch := New(schcfg)
		c2.Start()
		sch.SetMetricManager(c1)
		err := sch.Start()
		So(err, ShouldBeNil)
		// Load appropriate plugins into each control.
		mock2Path := path.Join(PluginPath, "snap-collector-mock2")
		passthruPath := path.Join(PluginPath, "snap-processor-passthru")
		filePath := path.Join(PluginPath, "snap-publisher-file")

		// mock2 and file onto c1

		rp, err := core.NewRequestedPlugin(mock2Path)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		rp, err = core.NewRequestedPlugin(filePath)
		So(err, ShouldBeNil)
		_, err = c1.Load(rp)
		So(err, ShouldBeNil)
		// passthru on c2
		rp, err = core.NewRequestedPlugin(passthruPath)
		So(err, ShouldBeNil)
		passthru, err := c2.Load(rp)
		So(err, ShouldBeNil)

		Convey("Test task with one local and one remote node", func() {
			//Create a task
			//Create a workflowmap
			wf := dsWFMap(port1)
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
		})

		Convey("Test task with invalid remote port", func() {
			wf := dsWFMap(0)
			controlproxy.MAX_CONNECTION_TIMEOUT = 1 * time.Second
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 1)
			So(t, ShouldBeNil)
		})

		Convey("Test task without remote plugin", func() {
			_, err := c2.Unload(passthru)
			So(err, ShouldBeNil)
			wf := dsWFMap(port1)
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(time.Second), wf, true)
			So(len(errs.Errors()), ShouldEqual, 1)
			So(t, ShouldBeNil)
		})

		Convey("Test task failing when control is stopped while task is running", func() {
			wf := dsWFMap(port1)
			controlproxy.MAX_CONNECTION_TIMEOUT = 10 * time.Second
			interval := time.Millisecond * 100
			t, errs := sch.CreateTask(schedule.NewSimpleSchedule(interval), wf, true)
			So(len(errs.Errors()), ShouldEqual, 0)
			So(t, ShouldNotBeNil)
			c2.Stop()
			// Give task time to fail
			time.Sleep(time.Second)
			tasks := sch.GetTasks()
			var task core.Task
			for _, v := range tasks {
				task = v
			}
			So(task.State(), ShouldEqual, core.TaskDisabled)
		})

	})

}