Example #1
0
func TestInfluxdbBackend(t *testing.T) {
	logging.SetLevel("debug")

	conf := &config.Transport_influxdb{
		Version:         "0.9.0",
		URL:             "http://10.3.6.225:8086/",
		Username:        "root",
		Password:        "root",
		Database:        "mydb",
		RetentionPolicy: "test",
	}

	merge := newcore.Merge(
		newcore.Subscribe(newcore.NewDummyCollector("c1", time.Millisecond*100, 1), nil),
		newcore.Subscribe(newcore.NewDummyCollector("c2", time.Millisecond*100, 1), nil),
	)

	b1, _ := NewInfluxdbBackend("b1", conf)
	fset := newcore.FanOut(merge, b1)

	fset_closed_chan := make(chan error)

	time.AfterFunc(time.Second*time.Duration(2), func() {
		// merge will be closed within FanOut
		fset_closed_chan <- fset.Close()
	})

	timeout := time.After(time.Second * time.Duration(3))

main_loop:
	for {
		select {
		case <-fset_closed_chan:
			fmt.Println("fset closed")
			break main_loop
		case <-timeout:
			t.Error("timed out! something is blocking")
			break main_loop
		}
	}

}
Example #2
0
func TestElasticsearchBackend(t *testing.T) {
	logging.SetLevel("debug")

	conf := &config.Transport_elasticsearch{
		URL:   "http://192.168.81.129:9200",
		Index: "test",
		Type:  "test",
	}

	merge := newcore.Merge(
		newcore.Subscribe(newcore.NewDummyCollector("c1", time.Millisecond*100, 1), nil),
		newcore.Subscribe(newcore.NewDummyCollector("c2", time.Millisecond*100, 1), nil),
	)

	b1, _ := NewElasticsearchBackend("b1", conf)
	fset := newcore.FanOut(merge, b1)

	fset_closed_chan := make(chan error)

	time.AfterFunc(time.Second*time.Duration(2), func() {
		// merge will be closed within FanOut
		fset_closed_chan <- fset.Close()
	})

	timeout := time.After(time.Second * time.Duration(3))

main_loop:
	for {
		select {
		case <-fset_closed_chan:
			fmt.Println("fset closed")
			break main_loop
		case <-timeout:
			t.Error("timed out! something is blocking")
			break main_loop
		}
	}

}
Example #3
0
// create the topology of our running core.
//
//  (collector -> subscription)s -> merged subscription -> fanout -> publications(backends)
//
func create_running_core_hooked(rconf *config.RuntimeConfig, ishook bool) (newcore.PublicationSet, *newcore.HookBackend, error) {
	var hook *newcore.HookBackend
	var subs []newcore.Subscription
	var heartbeat_exists bool

	if rconf == nil {
		return nil, nil, fmt.Errorf("RuntimeConfig is nil")
	}

	// create backends --------------------------------------------------------------------
	bks, err := backends.UseConfigCreateBackends(rconf)
	if err != nil {
		return nil, nil, err
	}

	if len(bks) <= 0 {
		return nil, nil, fmt.Errorf("no backends configured. program will do nothing.")
	}

	for _, bk := range bks {
		logging.Debugf("loaded backend: %s", bk.Name())
		logging.Tracef("loaded backend: %s -> %+v", bk.Name(), bk)
	}

	// create collectors ------------------------------------------------------------------
	clrs, err := collectors.UseConfigCreateCollectors(rconf)
	if err != nil {
		return nil, nil, err
	}

	// make sure heartbeat collector always created.
	for _, c := range clrs {
		if c.Name() == "heartbeat" {
			heartbeat_exists = true
		}
	}
	if heartbeat_exists == false {
		clrs = append(clrs, collectors.NewHeartBeat(rconf.Client.HeartBeat_Interval))
	}

	// create collector subscriptions.
	for _, c := range clrs {
		subs = append(subs, newcore.Subscribe(c, nil))
	}

	// create other subscriptions, such as kafka consumer
	_subs, err := collectors.UseConfigCreateSubscription(rconf)
	if err != nil {
		return nil, nil, err
	}
	subs = append(subs, _subs...)

	for _, s := range subs {
		logging.Debugf("loaded subscription: %s", s.Name())
		logging.Tracef("loaded subscription: %s -> %+v", s.Name(), s)
	}

	merged := newcore.Merge(subs...)

	if ishook == true {
		// the only reason to create a hooked running core is to do unittesting. it's not a good idea though.
		hook = newcore.NewHookBackend()
		bks = append(bks, hook)
		fset := newcore.FanOut(merged, bks...)
		return fset, hook, nil
	} else {
		fset := newcore.FanOut(merged, bks...)
		return fset, nil, nil
	}
}
Example #4
0
func TestFileBackend(t *testing.T) {
	os.Remove(test_file_path)

	conf := &config.Transport_file{
		Enabled: true,
		Path:    test_file_path,
	}

	merge := newcore.Merge(
		newcore.Subscribe(newcore.NewDummyCollector("c1", time.Millisecond*100, 1), nil),
		newcore.Subscribe(newcore.NewDummyCollector("c2", time.Millisecond*100, 1), nil),
	)

	b1, _ := NewFileBackend("b1", conf)

	hook := newcore.NewHookBackend()
	bks := []newcore.Publication{b1, hook}

	fset := newcore.FanOut(merge, bks...)

	fset_closed_chan := make(chan error)

	time.AfterFunc(time.Second*time.Duration(1), func() {
		// merge will be closed within FanOut
		fset_closed_chan <- fset.Close()
	})

	timeout := time.After(time.Second * time.Duration(3))

	expected := 0
main_loop:
	for {
		select {
		case md, ok := <-hook.Hook():
			if ok != false {
				expected += len(md)
			} else {
				break main_loop
			}

		case <-fset_closed_chan:
			fmt.Println("fset closed")
			break main_loop
		case <-timeout:
			t.Error("timed out! something is blocking")
			break main_loop
		}
	}

	// check how many data we collected.
	lines, err := fileLines(test_file_path)
	if err != nil {
		t.Error("failed counting lines", err)
		return
	}
	// on windows this may fail!
	os.Remove(test_file_path)

	t.Logf("expected: %d, got: %d", expected, lines)
	// 1s / 100 ms = 10 batch x 1 for each x 2 collectors = 20
	//	expected := 20
	if lines != expected {
		t.Error("lines mismatch: lines: %d, expected: %d", lines, expected)
	}

}