Beispiel #1
0
// Create a new Heka logging interface.
func NewHekaLogger(conf *MzConfig) *HekaLogger {
	//Preflight
	var encoder client.Encoder = nil
	var sender client.Sender = nil
	var logname string = ""
	var err error
	var tracer bool = false
	var filter int64

	pid := int32(os.Getpid())

	dhost, _ := os.Hostname()
	conf.SetDefaultFlag("heka.show_caller", false)
	conf.SetDefault("logger.filter", "10")
	filter, _ = strconv.ParseInt(conf.Get("logger.filter", "10"), 0, 0)
	if conf.GetFlag("heka.use") {
		encoder = client.NewProtobufEncoder(nil)
		sender, err = client.NewNetworkSender(conf.Get("heka.sender", "tcp"),
			conf.Get("heka.server_addr", "127.0.0.1:5565"))
		if err != nil {
			log.Panic("Could not create sender ", err)
		}
		logname = conf.Get("heka.logger_name", "package")
	}
	return &HekaLogger{encoder: encoder,
		sender:   sender,
		logname:  logname,
		pid:      pid,
		hostname: conf.Get("heka.current_host", dhost),
		conf:     conf,
		tracer:   tracer,
		filter:   filter}
}
Beispiel #2
0
func (s *SandboxEncoder) Init(config interface{}) (err error) {
	conf := config.(*SandboxEncoderConfig)
	s.sbc = &sandbox.SandboxConfig{
		ScriptType:       conf.ScriptType,
		ScriptFilename:   conf.ScriptFilename,
		ModuleDirectory:  conf.ModuleDirectory,
		PreserveData:     conf.PreserveData,
		MemoryLimit:      conf.MemoryLimit,
		InstructionLimit: conf.InstructionLimit,
		OutputLimit:      conf.OutputLimit,
		Profile:          conf.Profile,
		Config:           conf.Config,
	}
	s.sbc.ScriptFilename = pipeline.PrependShareDir(s.sbc.ScriptFilename)
	s.sampleDenominator = pipeline.Globals().SampleDenominator

	s.tz = time.UTC
	if tz, ok := s.sbc.Config["tz"]; ok {
		if s.tz, err = time.LoadLocation(tz.(string)); err != nil {
			return
		}
	}

	dataDir := pipeline.PrependBaseDir(sandbox.DATA_DIR)
	if !fileExists(dataDir) {
		if err = os.MkdirAll(dataDir, 0700); err != nil {
			return
		}
	}

	switch s.sbc.ScriptType {
	case "lua":
		s.sb, err = lua.CreateLuaSandbox(s.sbc)
	default:
		return fmt.Errorf("Unsupported script type: %s", s.sbc.ScriptType)
	}

	if err != nil {
		return fmt.Errorf("Sandbox creation failed: '%s'", err)
	}

	s.preservationFile = filepath.Join(dataDir, s.name+sandbox.DATA_EXT)
	if s.sbc.PreserveData && fileExists(s.preservationFile) {
		err = s.sb.Init(s.preservationFile, "encoder")
	} else {
		err = s.sb.Init("", "encoder")
	}
	if err != nil {
		return fmt.Errorf("Sandbox initialization failed: %s", err)
	}

	s.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		s.injected = true
		s.output = []byte(payload)
		return 0
	})
	s.sample = true
	s.cEncoder = client.NewProtobufEncoder(nil)
	return
}
Beispiel #3
0
// NewHekaClient returns a new HekaClient with pre-defined encoder and sender.
func NewHekaClient(hi string) (hc *HekaClient, err error) {
	hc = &HekaClient{}
	hc.encoder = client.NewProtobufEncoder(nil)
	hc.sender, err = client.NewNetworkSender("tcp", hi)
	if err == nil {
		return hc, nil
	}
	return
}
Beispiel #4
0
func main() {
	configFile := flag.String("config", "sbmgr.toml", "Sandbox manager configuration file")
	scriptFile := flag.String("script", "xyz.lua", "Sandbox script file")
	scriptConfig := flag.String("scriptconfig", "xyz.toml", "Sandbox script configuration file")
	filterName := flag.String("filtername", "filter", "Sandbox filter name (used on unload)")
	action := flag.String("action", "load", "Sandbox manager action")
	flag.Parse()

	var config SbmgrConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		log.Printf("Error decoding config file: %s", err)
		return
	}
	sender, err := client.NewNetworkSender("tcp", config.IpAddress)
	if err != nil {
		log.Fatalf("Error creating sender: %s\n", err.Error())
	}
	encoder := client.NewProtobufEncoder(&config.Signer)
	manager := client.NewClient(sender, encoder)

	hostname, _ := os.Hostname()
	msg := &message.Message{}
	msg.SetType("heka.control.sandbox")
	msg.SetTimestamp(time.Now().UnixNano())
	msg.SetUuid(uuid.NewRandom())
	msg.SetHostname(hostname)

	switch *action {
	case "load":
		code, err := ioutil.ReadFile(*scriptFile)
		if err != nil {
			log.Printf("Error reading scriptFile: %s\n", err.Error())
			return
		}
		msg.SetPayload(string(code))
		conf, err := ioutil.ReadFile(*scriptConfig)
		if err != nil {
			log.Printf("Error reading scriptConfig: %s\n", err.Error())
			return
		}
		f, _ := message.NewField("config", string(conf), "toml")
		msg.AddField(f)
	case "unload":
		f, _ := message.NewField("name", *filterName, "")
		msg.AddField(f)
	default:
		log.Printf("Invalid action: %s", *action)
	}

	f1, _ := message.NewField("action", *action, "")
	msg.AddField(f1)
	err = manager.SendMessage(msg)
	if err != nil {
		log.Printf("Error sending message: %s\n", err.Error())
	}
}
Beispiel #5
0
// NewProtobufEmitter creates a Protobuf-encoded Heka log message emitter.
// Protobuf encoding should be preferred over JSON for efficiency.
func NewProtobufEmitter(sender client.Sender, envVersion,
	hostname, loggerName string) *HekaEmitter {

	return &HekaEmitter{
		Sender:        sender,
		StreamEncoder: client.NewProtobufEncoder(nil),
		LogName:       fmt.Sprintf("%s-%s", loggerName, VERSION),
		Pid:           int32(os.Getpid()),
		EnvVersion:    envVersion,
		Hostname:      hostname,
	}
}
Beispiel #6
0
// NewHekaClient returns a new HekaClient with process ID, hostname, encoder and sender.
func NewHekaClient(h, hn string) (self *HekaClient, err error) {
	self = &HekaClient{}
	self.pid = int32(os.Getpid())
	if hn == "" {
		self.hostname, _ = os.Hostname()
	} else {
		self.hostname = hn
	}
	self.encoder = client.NewProtobufEncoder(nil)
	self.sender, err = client.NewNetworkSender("tcp", h)
	if err == nil {
		return self, nil
	}
	return
}
// sendToHeka sends array of snap metrics to Heka
func (shc *SnapHekaClient) sendToHeka(metrics []plugin.MetricType) error {
	pid := int32(os.Getpid())
	hostname, _ := os.Hostname()

	// Initializes Heka message encoder
	encoder := client.NewProtobufEncoder(nil)

	// Creates Heka message sender
	sender, err := client.NewNetworkSender(shc.hekaScheme, shc.hekaHost)
	if err != nil {
		logger.WithField("_block", "sendToHeka").Error("create NewNetworkSender error: ", err)
		return err
	}

	var buf []byte
	for _, m := range metrics {
		b, _, e := plugin.MarshalMetricTypes(plugin.SnapJSONContentType, []plugin.MetricType{m})
		if e != nil {
			logger.WithField("_block", "sendToHeka").Error("marshal metric error: ", m)
			continue
		}

		// Converts snap metric to Heka message
		msg, err := createHekaMessage(string(b), m, pid, hostname)
		if err != nil {
			logger.WithField("_block", "sendToHeka").Error("create message error: ", err)
			continue
		}
		err = encoder.EncodeMessageStream(msg, &buf)
		if err != nil {
			logger.WithField("_block", "sendToHeka").Error("encoding error: ", err)
			continue
		}

		err = sender.SendMessage(buf)
		if err != nil {
			logger.WithField("_block", "sendToHeka").Error("sending message error: ", err)
		}
	}
	sender.Close()
	return nil
}
Beispiel #8
0
func (no *NsqOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		encoder client.Encoder
		msg     *message.Message
		msgBody []byte = make([]byte, 0, 1024)
		pack    *pipeline.PipelinePack
	)

	conf := no.conf
	encoder = client.NewProtobufEncoder(nil)

	for pack = range or.InChan() {
		if conf.Serialize {
			msg = pack.Message
			if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil {
				or.LogError(err)
				err = nil
				pack.Recycle()
				continue
			}
			//err := no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			//err = no.nsqwriter.PublishAsync(conf.Topic, msgBody, nil)
			_, _, err = no.nsqwriter.Publish(conf.Topic, msgBody)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
			msgBody = msgBody[:0]
		} else {
			err = no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
		}
		pack.Recycle()
	}

	return
}
Beispiel #9
0
func AMQPPluginSpec(c gs.Context) {
	t := &ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	config := NewPipelineConfig(nil)

	// Our two user/conn waitgroups
	ug := new(sync.WaitGroup)
	cg := new(sync.WaitGroup)

	// Setup the mock channel
	mch := NewMockAMQPChannel(ctrl)

	// Setup the mock amqpHub with the mock chan return
	aqh := NewMockAMQPConnectionHub(ctrl)
	aqh.EXPECT().GetChannel("").Return(mch, ug, cg, nil)
	var oldHub AMQPConnectionHub
	oldHub = amqpHub
	amqpHub = aqh
	defer func() {
		amqpHub = oldHub
	}()

	c.Specify("An amqp input", func() {

		// Setup all the mock calls for Init
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)
		mch.EXPECT().QueueDeclare("", false, true, false, false,
			gomock.Any()).Return(amqp.Queue{}, nil)
		mch.EXPECT().QueueBind("", "test", "", false, gomock.Any()).Return(nil)
		mch.EXPECT().Qos(2, 0, false).Return(nil)

		ith := new(InputTestHelper)
		ith.Msg = getTestMessage()
		ith.Pack = NewPipelinePack(config.inputRecycleChan)

		// set up mock helper, decoder set, and packSupply channel
		ith.MockHelper = NewMockPluginHelper(ctrl)
		ith.MockInputRunner = NewMockInputRunner(ctrl)
		ith.Decoders = make([]DecoderRunner, int(message.Header_JSON+1))
		ith.Decoders[message.Header_PROTOCOL_BUFFER] = NewMockDecoderRunner(ctrl)
		ith.Decoders[message.Header_JSON] = NewMockDecoderRunner(ctrl)
		ith.PackSupply = make(chan *PipelinePack, 1)
		ith.DecodeChan = make(chan *PipelinePack)
		ith.MockDecoderSet = NewMockDecoderSet(ctrl)

		ith.MockInputRunner.EXPECT().InChan().Return(ith.PackSupply)
		ith.MockHelper.EXPECT().DecoderSet().Times(2).Return(ith.MockDecoderSet)
		encCall := ith.MockDecoderSet.EXPECT().ByEncodings()
		encCall.Return(ith.Decoders, nil)

		c.Specify("with a valid setup and no decoders", func() {

			amqpInput := new(AMQPInput)
			defaultConfig := amqpInput.ConfigStruct().(*AMQPInputConfig)
			defaultConfig.URL = ""
			defaultConfig.Exchange = ""
			defaultConfig.ExchangeType = ""
			defaultConfig.RoutingKey = "test"
			err := amqpInput.Init(defaultConfig)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			c.Specify("consumes a message", func() {
				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
				}()
				ith.PackSupply <- ith.Pack
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
				close(streamChan)
			})

			c.Specify("consumes a serialized message", func() {
				encoder := client.NewProtobufEncoder(nil)
				streamChan := make(chan amqp.Delivery, 1)

				msg := new(message.Message)
				msg.SetUuid(uuid.NewRandom())
				msg.SetTimestamp(time.Now().UnixNano())
				msg.SetType("logfile")
				msg.SetLogger("/a/nice/path")
				msg.SetSeverity(int32(0))
				msg.SetEnvVersion("0.2")
				msg.SetPid(0)
				msg.SetPayload("This is a message")
				msg.SetHostname("TestHost")

				msgBody := make([]byte, 0, 500)
				_ = encoder.EncodeMessageStream(msg, &msgBody)

				ack := ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)

				streamChan <- amqp.Delivery{
					ContentType:  "application/hekad",
					Body:         msgBody,
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the decoded pack
				mockDecoderRunner := ith.Decoders[message.Header_PROTOCOL_BUFFER].(*MockDecoderRunner)
				mockDecoderRunner.EXPECT().InChan().Return(ith.DecodeChan)

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
				}()
				packRef := <-ith.DecodeChan
				c.Expect(ith.Pack, gs.Equals, packRef)
				// Ignore leading 5 bytes of encoded message as thats the header
				c.Expect(string(packRef.MsgBytes), gs.Equals, string(msgBody[5:]))
				ith.PackSupply <- ith.Pack
				close(streamChan)
			})
		})

		c.Specify("with a valid setup using decoders", func() {
			amqpInput := new(AMQPInput)
			defaultConfig := amqpInput.ConfigStruct().(*AMQPInputConfig)
			defaultConfig.URL = ""
			defaultConfig.Exchange = ""
			defaultConfig.ExchangeType = ""
			defaultConfig.RoutingKey = "test"
			defaultConfig.Decoders = []string{"defaultDecode"}
			err := amqpInput.Init(defaultConfig)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			c.Specify("consumes a message", func() {
				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Make the mock decoder runner and decoder for the 'defaultDecode'
				dmr := NewMockDecoderRunner(ctrl)
				mdec := NewMockDecoder(ctrl)
				ith.MockDecoderSet.EXPECT().ByName("defaultDecode").Return(dmr, true)
				dmr.EXPECT().Decoder().Return(mdec)
				mdec.EXPECT().Decode(ith.Pack).Return(nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
				}()
				ith.PackSupply <- ith.Pack
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
				close(streamChan)
			})
		})
	})

	c.Specify("An amqp output", func() {
		oth := NewOutputTestHelper(ctrl)
		pConfig := NewPipelineConfig(nil)

		amqpOutput := new(AMQPOutput)
		defaultConfig := amqpOutput.ConfigStruct().(*AMQPOutputConfig)
		defaultConfig.URL = ""
		defaultConfig.Exchange = ""
		defaultConfig.ExchangeType = ""
		defaultConfig.RoutingKey = "test"

		closeChan := make(chan *amqp.Error)

		inChan := make(chan *PipelinePack, 1)

		mch.EXPECT().NotifyClose(gomock.Any()).Return(closeChan)
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)

		// Increase the usage since Run decrements it on close
		ug.Add(1)

		// Expect the close and the InChan calls
		aqh.EXPECT().Close("", cg)
		oth.MockOutputRunner.EXPECT().InChan().Return(inChan)

		msg := getTestMessage()
		pack := NewPipelinePack(pConfig.inputRecycleChan)
		pack.Message = msg
		pack.Decoded = true

		c.Specify("publishes a plain message", func() {
			defaultConfig.Serialize = false

			err := amqpOutput.Init(defaultConfig)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)

			go func() {
				amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
			}()

			ug.Wait()

		})

		c.Specify("publishes a serialized message", func() {
			err := amqpOutput.Init(defaultConfig)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)

			go func() {
				amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
			}()
			ug.Wait()
		})
	})
}
Beispiel #10
0
// Create a protocol buffers stream for the given message, put it in the given
// byte slice.
func createProtobufStream(pack *PipelinePack, outBytes *[]byte) (err error) {
	enc := client.NewProtobufEncoder(nil)
	err = enc.EncodeMessageStream(pack.Message, outBytes)
	return
}
Beispiel #11
0
func main() {
	configFile := flag.String("config", "flood.toml", "Heka Flood configuration file")
	configTest := flag.String("test", "default", "Test section to load")

	flag.Parse()

	if flag.NFlag() == 0 {
		flag.PrintDefaults()
		os.Exit(0)
	}

	var config FloodConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		client.LogError.Printf("Error decoding config file: %s", err)
		return
	}
	var test FloodTest
	var ok bool
	if test, ok = config[*configTest]; !ok {
		client.LogError.Printf("Configuration test: '%s' was not found", *configTest)
		return
	}

	if test.MsgInterval != "" {
		var err error
		if test.msgInterval, err = time.ParseDuration(test.MsgInterval); err != nil {
			client.LogError.Printf("Invalid message_interval duration %s: %s", test.MsgInterval,
				err.Error())
			return
		}
	}

	if test.PprofFile != "" {
		profFile, err := os.Create(test.PprofFile)
		if err != nil {
			client.LogError.Fatalln(err)
		}
		pprof.StartCPUProfile(profFile)
		defer pprof.StopCPUProfile()
	}

	if test.MaxMessageSize > 0 {
		message.SetMaxMessageSize(test.MaxMessageSize)
	}

	var sender *client.NetworkSender
	var err error
	if test.UseTls {
		var goTlsConfig *tls.Config
		goTlsConfig, err = tcp.CreateGoTlsConfig(&test.Tls)
		if err != nil {
			client.LogError.Fatalf("Error creating TLS config: %s\n", err)
		}
		sender, err = client.NewTlsSender(test.Sender, test.IpAddress, goTlsConfig)
	} else {
		sender, err = client.NewNetworkSender(test.Sender, test.IpAddress)
	}
	if err != nil {
		client.LogError.Fatalf("Error creating sender: %s\n", err)
	}

	unsignedEncoder := client.NewProtobufEncoder(nil)
	signedEncoder := client.NewProtobufEncoder(&test.Signer)
	oversizedEncoder := &OversizedEncoder{}

	var numTestMessages = 1
	var unsignedMessages [][]byte
	var signedMessages [][]byte
	var oversizedMessages [][]byte

	rdm := &randomDataMaker{
		src:       rand.NewSource(time.Now().UnixNano()),
		asciiOnly: test.AsciiOnly,
	}

	if test.VariableSizeMessages {
		numTestMessages = 64
		unsignedMessages = makeVariableMessage(unsignedEncoder, numTestMessages, rdm, false)
		signedMessages = makeVariableMessage(signedEncoder, numTestMessages, rdm, false)
		oversizedMessages = makeVariableMessage(oversizedEncoder, 1, rdm, true)
	} else {
		if test.StaticMessageSize == 0 {
			test.StaticMessageSize = 1000
		}
		unsignedMessages = makeFixedMessage(unsignedEncoder, test.StaticMessageSize,
			rdm)
		signedMessages = makeFixedMessage(signedEncoder, test.StaticMessageSize,
			rdm)
	}
	// wait for sigint
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT)
	var msgsSent, bytesSent, msgsDelivered uint64
	var corruptPercentage, lastCorruptPercentage, signedPercentage, lastSignedPercentage, oversizedPercentage, lastOversizedPercentage float64
	var corrupt bool

	// set up counter loop
	ticker := time.NewTicker(time.Duration(time.Second))
	go timerLoop(&msgsSent, &bytesSent, ticker)

	test.CorruptPercentage /= 100.0
	test.SignedPercentage /= 100.0
	test.OversizedPercentage /= 100.0

	var buf []byte
	for gotsigint := false; !gotsigint; {
		runtime.Gosched()
		select {
		case <-sigChan:
			gotsigint = true
			continue
		default:
		}
		msgId := rand.Int() % numTestMessages
		corruptPercentage = math.Floor(float64(msgsSent) * test.CorruptPercentage)
		if corruptPercentage != lastCorruptPercentage {
			lastCorruptPercentage = corruptPercentage
			corrupt = true
		} else {
			corrupt = false
		}
		signedPercentage = math.Floor(float64(msgsSent) * test.SignedPercentage)
		if signedPercentage != lastSignedPercentage {
			lastSignedPercentage = signedPercentage
			buf = signedMessages[msgId]
		} else {
			oversizedPercentage = math.Floor(float64(msgsSent) * test.OversizedPercentage)
			if oversizedPercentage != lastOversizedPercentage {
				lastOversizedPercentage = oversizedPercentage
				buf = oversizedMessages[0]
			} else {
				buf = unsignedMessages[msgId]
			}
		}
		bytesSent += uint64(len(buf))
		if err = sendMessage(sender, buf, corrupt); err != nil {
			client.LogError.Printf("Error sending message: %s\n", err.Error())
		} else {
			msgsDelivered++
		}
		msgsSent++
		if test.NumMessages != 0 && msgsSent >= test.NumMessages {
			break
		}
		if test.msgInterval != 0 {
			time.Sleep(test.msgInterval)
		}
	}
	sender.Close()
	client.LogInfo.Println("Clean shutdown: ", msgsSent, " messages sent; ",
		msgsDelivered, " messages delivered.")
}
Beispiel #12
0
func AMQPPluginSpec(c gs.Context) {
	t := &pipeline_ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	config := NewPipelineConfig(nil)

	// Our two user/conn waitgroups.
	ug := new(sync.WaitGroup)
	cg := new(sync.WaitGroup)

	// Setup the mock channel.
	mch := NewMockAMQPChannel(ctrl)

	// Setup the mock amqpHub with the mock chan return.
	aqh := NewMockAMQPConnectionHub(ctrl)
	aqh.EXPECT().GetChannel("", AMQPDialer{}).Return(mch, ug, cg, nil)

	errChan := make(chan error, 1)
	bytesChan := make(chan []byte, 1)

	c.Specify("An amqp input", func() {
		// Setup all the mock calls for Init.
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)
		mch.EXPECT().QueueDeclare("", false, true, false, false,
			gomock.Any()).Return(amqp.Queue{}, nil)
		mch.EXPECT().QueueBind("", "test", "", false, gomock.Any()).Return(nil)
		mch.EXPECT().Qos(2, 0, false).Return(nil)

		ith := new(plugins_ts.InputTestHelper)
		ith.Msg = pipeline_ts.GetTestMessage()
		ith.Pack = NewPipelinePack(config.InputRecycleChan())

		// Set up relevant mocks.
		ith.MockHelper = NewMockPluginHelper(ctrl)
		ith.MockInputRunner = NewMockInputRunner(ctrl)
		ith.MockSplitterRunner = NewMockSplitterRunner(ctrl)
		ith.PackSupply = make(chan *PipelinePack, 1)

		ith.MockInputRunner.EXPECT().NewSplitterRunner("").Return(ith.MockSplitterRunner)

		amqpInput := new(AMQPInput)
		amqpInput.amqpHub = aqh
		config := amqpInput.ConfigStruct().(*AMQPInputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"
		config.QueueTTL = 300000

		err := amqpInput.Init(config)
		c.Assume(err, gs.IsNil)
		c.Expect(amqpInput.ch, gs.Equals, mch)

		c.Specify("consumes a text message", func() {
			// Create a channel to send data to the input. Drop a message on
			// there and close the channel.
			streamChan := make(chan amqp.Delivery, 1)
			ack := plugins_ts.NewMockAcknowledger(ctrl)
			ack.EXPECT().Ack(gomock.Any(), false)
			streamChan <- amqp.Delivery{
				ContentType:  "text/plain",
				Body:         []byte("This is a message"),
				Timestamp:    time.Now(),
				Acknowledger: ack,
			}
			mch.EXPECT().Consume("", "", false, false, false, false,
				gomock.Any()).Return(streamChan, nil)

			// Increase the usage since Run decrements it on close.
			ug.Add(1)

			splitCall := ith.MockSplitterRunner.EXPECT().SplitBytes(gomock.Any(),
				nil)
			splitCall.Do(func(recd []byte, del Deliverer) {
				bytesChan <- recd
			})
			ith.MockSplitterRunner.EXPECT().UseMsgBytes().Return(false)
			ith.MockSplitterRunner.EXPECT().SetPackDecorator(gomock.Any())
			go func() {
				err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
				errChan <- err
			}()

			msgBytes := <-bytesChan
			c.Expect(string(msgBytes), gs.Equals, "This is a message")
			close(streamChan)
			err = <-errChan
		})

		c.Specify("consumes a protobuf encoded message", func() {
			encoder := client.NewProtobufEncoder(nil)
			streamChan := make(chan amqp.Delivery, 1)

			msg := new(message.Message)
			msg.SetUuid(uuid.NewRandom())
			msg.SetTimestamp(time.Now().UnixNano())
			msg.SetType("logfile")
			msg.SetLogger("/a/nice/path")
			msg.SetSeverity(int32(0))
			msg.SetEnvVersion("0.2")
			msg.SetPid(0)
			msg.SetPayload("This is a message")
			msg.SetHostname("TestHost")

			msgBody := make([]byte, 0, 500)
			_ = encoder.EncodeMessageStream(msg, &msgBody)

			ack := plugins_ts.NewMockAcknowledger(ctrl)
			ack.EXPECT().Ack(gomock.Any(), false)

			streamChan <- amqp.Delivery{
				ContentType:  "application/hekad",
				Body:         msgBody,
				Timestamp:    time.Now(),
				Acknowledger: ack,
			}
			mch.EXPECT().Consume("", "", false, false, false, false,
				gomock.Any()).Return(streamChan, nil)

			// Increase the usage since Run decrements it on close.
			ug.Add(1)

			splitCall := ith.MockSplitterRunner.EXPECT().SplitBytes(gomock.Any(),
				nil)
			splitCall.Do(func(recd []byte, del Deliverer) {
				bytesChan <- recd
			})
			ith.MockSplitterRunner.EXPECT().UseMsgBytes().Return(true)
			go func() {
				err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
				errChan <- err
			}()

			msgBytes := <-bytesChan
			c.Expect(string(msgBytes), gs.Equals, string(msgBody))
			close(streamChan)
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})
	})

	c.Specify("An amqp output", func() {
		oth := plugins_ts.NewOutputTestHelper(ctrl)
		pConfig := NewPipelineConfig(nil)

		amqpOutput := new(AMQPOutput)
		amqpOutput.amqpHub = aqh
		config := amqpOutput.ConfigStruct().(*AMQPOutputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"

		closeChan := make(chan *amqp.Error)

		inChan := make(chan *PipelinePack, 1)

		mch.EXPECT().NotifyClose(gomock.Any()).Return(closeChan)
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)

		// Increase the usage since Run decrements it on close.
		ug.Add(1)

		// Expect the close and the InChan calls.
		aqh.EXPECT().Close("", cg)
		oth.MockOutputRunner.EXPECT().InChan().Return(inChan)

		msg := pipeline_ts.GetTestMessage()
		pack := NewPipelinePack(pConfig.InputRecycleChan())
		pack.Message = msg
		pack.Decoded = true

		c.Specify("publishes a plain message", func() {
			encoder := new(plugins.PayloadEncoder)
			econfig := encoder.ConfigStruct().(*plugins.PayloadEncoderConfig)
			econfig.AppendNewlines = false
			encoder.Init(econfig)
			payloadBytes, err := encoder.Encode(pack)

			config.Encoder = "PayloadEncoder"
			config.ContentType = "text/plain"
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(payloadBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()

			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})

		c.Specify("publishes a serialized message", func() {
			encoder := new(ProtobufEncoder)
			encoder.SetPipelineConfig(pConfig)
			encoder.Init(nil)
			protoBytes, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(protoBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()
			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})
	})
}
Beispiel #13
0
func (p *ProtobufEncoder) Init(config interface{}) error {
	p.cEncoder = client.NewProtobufEncoder(nil)
	p.sample = true
	p.sampleDenominator = Globals().SampleDenominator
	return nil
}
Beispiel #14
0
func main() {
	configFile := flag.String("config", "sbmgr.toml", "Sandbox manager configuration file")
	scriptFile := flag.String("script", "xyz.lua", "Sandbox script file")
	scriptConfig := flag.String("scriptconfig", "xyz.toml", "Sandbox script configuration file")
	filterName := flag.String("filtername", "filter", "Sandbox filter name (used on unload)")
	action := flag.String("action", "load", "Sandbox manager action")
	flag.Parse()

	var config SbmgrConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		client.LogError.Printf("Error decoding config file: %s", err)
		return
	}
	var sender *client.NetworkSender
	var err error
	if config.UseTls {
		var goTlsConfig *tls.Config
		goTlsConfig, err = tcp.CreateGoTlsConfig(&config.Tls)
		if err != nil {
			client.LogError.Fatalf("Error creating TLS config: %s\n", err)
		}
		sender, err = client.NewTlsSender("tcp", config.IpAddress, goTlsConfig)
	} else {
		sender, err = client.NewNetworkSender("tcp", config.IpAddress)
	}
	if err != nil {
		client.LogError.Fatalf("Error creating sender: %s\n", err.Error())
	}
	encoder := client.NewProtobufEncoder(&config.Signer)
	manager := client.NewClient(sender, encoder)

	hostname, _ := os.Hostname()
	msg := &message.Message{}
	msg.SetLogger(pipeline.HEKA_DAEMON) // identify the message as 'internal' for filtering purposes
	msg.SetType("heka.control.sandbox")
	msg.SetTimestamp(time.Now().UnixNano())
	msg.SetUuid(uuid.NewRandom())
	msg.SetHostname(hostname)

	switch *action {
	case "load":
		code, err := ioutil.ReadFile(*scriptFile)
		if err != nil {
			client.LogError.Printf("Error reading scriptFile: %s\n", err.Error())
			return
		}
		msg.SetPayload(string(code))
		conf, err := ioutil.ReadFile(*scriptConfig)
		if err != nil {
			client.LogError.Printf("Error reading scriptConfig: %s\n", err.Error())
			return
		}
		f, _ := message.NewField("config", string(conf), "toml")
		msg.AddField(f)
	case "unload":
		f, _ := message.NewField("name", *filterName, "")
		msg.AddField(f)
	default:
		client.LogError.Printf("Invalid action: %s", *action)
	}

	f1, _ := message.NewField("action", *action, "")
	msg.AddField(f1)
	err = manager.SendMessage(msg)
	if err != nil {
		client.LogError.Printf("Error sending message: %s\n", err.Error())
	}
}
Beispiel #15
0
func main() {
	configFile := flag.String("config", "sbmgrload.toml", "Sandbox manager load configuration file")
	action := flag.String("action", "load", "load/unload")
	numItems := flag.Int("num", 1, "Number of sandboxes to load/unload")
	flag.Parse()

	code := `
require "string"
require "table"
require "os"

lastTime = os.time() * 1e9
lastCount = 0
count = 0
rate = 0.0
rates = {}

function process_message ()
    count = count + 1
    return 0
end

function timer_event(ns)
    local msgsSent = count - lastCount
    if msgsSent == 0 then return end

    local elapsedTime = ns - lastTime
    if elapsedTime == 0 then return end

    lastCount = count
    lastTime = ns
    rate = msgsSent / (elapsedTime / 1e9)
    rates[#rates+1] = rate
    inject_payload("txt", "rate", string.format("Got %d messages. %0.2f msg/sec", count, rate))

    local samples = #rates
    if samples == 10 then -- generate a summary every 10 samples
        table.sort(rates)
	     local min = rates[1]
	     local max = rates[samples]
	     local sum = 0
        for i, val in ipairs(rates) do
            sum = sum + val
	     end
        inject_payload("txt", "summary", string.format("AGG Sum. Min: %0.2f Max: %0.2f Mean: %0.2f", min, max, sum/samples))
        rates = {}
    end
end
`
	confFmt := `
[CounterSandbox%d]
type = "SandboxFilter"
message_matcher = "Type == 'hekabench'"
ticker_interval = 1
filename = ""
preserve_data = true
`

	var config SbmgrConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		client.LogError.Printf("Error decoding config file: %s", err)
		return
	}
	var sender *client.NetworkSender
	var err error
	if config.UseTls {
		var goTlsConfig *tls.Config
		goTlsConfig, err = tcp.CreateGoTlsConfig(&config.Tls)
		if err != nil {
			client.LogError.Fatalf("Error creating TLS config: %s\n", err)
		}
		sender, err = client.NewTlsSender("tcp", config.IpAddress, goTlsConfig)
	} else {
		sender, err = client.NewNetworkSender("tcp", config.IpAddress)
	}
	if err != nil {
		client.LogError.Fatalf("Error creating sender: %s\n", err.Error())
	}
	encoder := client.NewProtobufEncoder(&config.Signer)
	manager := client.NewClient(sender, encoder)
	hostname, _ := os.Hostname()

	switch *action {
	case "load":
		for i := 0; i < *numItems; i++ {
			conf := fmt.Sprintf(confFmt, i)
			msg := &message.Message{}
			msg.SetLogger(pipeline.HEKA_DAEMON)
			msg.SetType("heka.control.sandbox")
			msg.SetTimestamp(time.Now().UnixNano())
			msg.SetUuid(uuid.NewRandom())
			msg.SetHostname(hostname)
			msg.SetPayload(code)
			f, _ := message.NewField("config", conf, "toml")
			msg.AddField(f)
			f1, _ := message.NewField("action", *action, "")
			msg.AddField(f1)
			err = manager.SendMessage(msg)
			if err != nil {
				client.LogError.Printf("Error sending message: %s\n", err.Error())
			}
		}
	case "unload":
		for i := 0; i < *numItems; i++ {
			msg := &message.Message{}
			msg.SetLogger(pipeline.HEKA_DAEMON)
			msg.SetType("heka.control.sandbox")
			msg.SetTimestamp(time.Now().UnixNano())
			msg.SetUuid(uuid.NewRandom())
			msg.SetHostname(hostname)
			f, _ := message.NewField("name", fmt.Sprintf("CounterSandbox%d", i), "")
			msg.AddField(f)
			f1, _ := message.NewField("action", *action, "")
			msg.AddField(f1)
			err = manager.SendMessage(msg)
			if err != nil {
				client.LogError.Printf("Error sending message: %s\n", err.Error())
			}
		}

	default:
		client.LogError.Printf("Invalid action: %s\n", *action)
	}

}
Beispiel #16
0
func QueueBufferSpec(c gs.Context) {
	tmpDir, tmpErr := ioutil.TempDir("", "queuebuffer-tests")

	defer func() {
		tmpErr = os.RemoveAll(tmpDir)
		c.Expect(tmpErr, gs.Equals, nil)
	}()

	t := &ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	c.Specify("QueueBuffer Internals", func() {
		pConfig := NewPipelineConfig(nil)

		err := pConfig.RegisterDefault("HekaFramingSplitter")
		c.Assume(err, gs.IsNil)
		RegisterPlugin("FooOutput", func() interface{} {
			return &FooOutput{}
		})

		outputToml := `[FooOutput]
        message_matcher = "TRUE"
        `

		var configFile ConfigFile
		_, err = toml.Decode(outputToml, &configFile)
		c.Assume(err, gs.IsNil)
		section, ok := configFile["FooOutput"]
		c.Assume(ok, gs.IsTrue)
		maker, err := NewPluginMaker("FooOutput", pConfig, section)
		c.Assume(err, gs.IsNil)

		orTmp, err := maker.MakeRunner("FooOutput")
		c.Assume(err, gs.IsNil)
		or := orTmp.(*foRunner)

		// h := NewMockPluginHelper(ctrl)
		// h.EXPECT().PipelineConfig().Return(pConfig)

		qConfig := &QueueBufferConfig{
			FullAction:        "block",
			CursorUpdateCount: 1,
			MaxFileSize:       66000,
		}
		feeder, reader, err := NewBufferSet(tmpDir, "test", qConfig, or, pConfig)
		// bufferedOutput, err := NewBufferedOutput(tmpDir, "test", or, h, uint64(0))
		c.Assume(err, gs.IsNil)
		msg := ts.GetTestMessage()

		c.Specify("fileExists", func() {
			c.Expect(fileExists(tmpDir), gs.IsTrue)
			c.Expect(fileExists(filepath.Join(tmpDir, "test.log")), gs.IsFalse)
		})

		c.Specify("extractBufferId", func() {
			id, err := extractBufferId("555.log")
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(555))
			id, err = extractBufferId("")
			c.Expect(err, gs.Not(gs.IsNil))
			id, err = extractBufferId("a.log")
			c.Expect(err, gs.Not(gs.IsNil))
		})

		c.Specify("findBufferId", func() {
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(0))
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(0))
			fd, err := os.Create(filepath.Join(tmpDir, "4.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "5.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "6a.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(4))
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(5))
		})

		c.Specify("writeCheckpoint", func() {
			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			err := reader.writeCheckpoint("43 99999")
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(reader.checkpointFilename), gs.IsTrue)

			id, offset, err := readCheckpoint(reader.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(99999))

			err = reader.writeCheckpoint("43 1")
			c.Expect(err, gs.IsNil)
			id, offset, err = readCheckpoint(reader.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(1))
			reader.checkpointFile.Close()
		})

		c.Specify("readCheckpoint", func() {
			cp := filepath.Join(tmpDir, "cp.txt")
			file, err := os.OpenFile(cp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
			c.Expect(err, gs.IsNil)
			id, offset, err := readCheckpoint(cp)
			c.Expect(err, gs.Not(gs.IsNil))

			file.WriteString("22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint format")

			file.Seek(0, 0)
			file.WriteString("aa 22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint id")

			file.Seek(0, 0)
			file.WriteString("43 aa")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint offset")

			file.Seek(0, 0)
			file.WriteString("43 22")
			file.Close()

			id, offset, err = readCheckpoint(cp)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(22))
		})

		c.Specify("RollQueue", func() {
			feeder.queue = tmpDir
			err := feeder.RollQueue()
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(getQueueFilename(feeder.queue,
				feeder.writeId)), gs.IsTrue)
			feeder.writeFile.WriteString("this is a test item")
			feeder.writeFile.Close()

			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			reader.queue = tmpDir
			reader.writeCheckpoint(fmt.Sprintf("%d 10", feeder.writeId))
			reader.checkpointFile.Close()
			err = reader.initReadFile()
			c.Assume(err, gs.IsNil)
			buf := make([]byte, 4)
			n, err := reader.readFile.Read(buf)
			c.Expect(n, gs.Equals, 4)
			c.Expect(string(buf), gs.Equals, "test")
			feeder.writeFile.Close()
			reader.readFile.Close()
		})

		c.Specify("QueueRecord", func() {
			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			reader.queue = tmpDir
			newpack := NewPipelinePack(nil)
			newpack.Message = msg
			payload := "Write me out to the network"
			newpack.Message.SetPayload(payload)
			encoder := client.NewProtobufEncoder(nil)
			protoBytes, err := encoder.EncodeMessage(newpack.Message)
			newpack.MsgBytes = protoBytes
			expectedLen := 115

			c.Specify("adds framing", func() {
				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)
				err = feeder.QueueRecord(newpack)
				fName := getQueueFilename(feeder.queue, feeder.writeId)
				c.Expect(fileExists(fName), gs.IsTrue)
				c.Expect(err, gs.IsNil)
				feeder.writeFile.Close()

				f, err := os.Open(fName)
				c.Expect(err, gs.IsNil)

				n, record, err := reader.sRunner.GetRecordFromStream(f)
				f.Close()
				c.Expect(n, gs.Equals, expectedLen)
				c.Expect(err, gs.IsNil)
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				record = record[headerLen:]
				outMsg := new(message.Message)
				proto.Unmarshal(record, outMsg)
				c.Expect(outMsg.GetPayload(), gs.Equals, payload)
			})

			c.Specify("when queue has limit", func() {
				feeder.Config.MaxBufferSize = uint64(200)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))

				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)

				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(expectedLen))
			})

			c.Specify("when queue has limit and is full", func() {
				feeder.Config.MaxBufferSize = uint64(50)
				feeder.Config.MaxFileSize = uint64(50)

				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))
				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)
				queueFiles, err := ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				numFiles := len(queueFiles)

				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.Equals, QueueIsFull)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))

				// Bump the max queue size so it will accept a record.
				feeder.Config.MaxBufferSize = uint64(120)
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)

				// Queue should have rolled.
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, numFiles+1)

				// Try to queue one last time, it should fail.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.Equals, QueueIsFull)

				// Ensure queue didn't roll twice.
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, numFiles+1)
			})

			c.Specify("rolls when queue file hits max size", func() {
				feeder.Config.MaxFileSize = uint64(300)
				c.Assume(feeder.writeFileSize, gs.Equals, uint64(0))

				// First two shouldn't trigger roll.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen*2))
				queueFiles, err := ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, 1)

				// Third one should.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen))
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, 2)
			})
		})

		c.Specify("getQueueBufferSize", func() {
			c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(0))

			fd, _ := os.Create(filepath.Join(tmpDir, "4.log"))
			fd.WriteString("0123456789")
			fd.Close()

			fd, _ = os.Create(filepath.Join(tmpDir, "5.log"))
			fd.WriteString("0123456789")
			fd.Close()

			// Only size of *.log files should be taken in calculations.
			fd, _ = os.Create(filepath.Join(tmpDir, "random_file"))
			fd.WriteString("0123456789")
			fd.Close()

			c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(20))
		})
	})
}
Beispiel #17
0
func main() {
	configFile := flag.String("config", "sbmgrload.toml", "Sandbox manager load configuration file")
	action := flag.String("action", "load", "load/unload")
	numItems := flag.Int("num", 1, "Number of sandboxes to load/unload")
	flag.Parse()

	code := `
lastTime = os.time() * 1e9
lastCount = 0
count = 0
rate = 0.0
rates = {}

function process_message ()
    count = count + 1
    return 0
end

function timer_event(ns)
    local msgsSent = count - lastCount
    if msgsSent == 0 then return end

    local elapsedTime = ns - lastTime
    if elapsedTime == 0 then return end

    lastCount = count
    lastTime = ns
    rate = msgsSent / (elapsedTime / 1e9)
    rates[#rates+1] = rate
    output(string.format("Got %d messages. %0.2f msg/sec", count, rate))
    inject_message()

    local samples = #rates
    if samples == 10 then -- generate a summary every 10 samples
        table.sort(rates)
	     local min = rates[1]
	     local max = rates[samples]
	     local sum = 0
        for i, val in ipairs(rates) do
            sum = sum + val
	     end
        output(string.format("AGG Sum. Min: %0.2f Max: %0.2f Mean: %0.2f", min, max, sum/samples))
        inject_message()
	     rates = {}
    end
end
`
	confFmt := `
[CounterSandbox%d]
type = "SandboxFilter"
message_matcher = "Type == 'hekabench'"
ticker_interval = 1.0
	[CounterSandbox%d.settings]
    type = "lua"
    filename = ""
    preserve_data = true
    memory_limit = 32767
    instruction_limit = 1000
    output_limit = 1024
`

	var config SbmgrConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		log.Printf("Error decoding config file: %s", err)
		return
	}
	sender, err := client.NewNetworkSender("tcp", config.IpAddress)
	if err != nil {
		log.Fatalf("Error creating sender: %s\n", err.Error())
	}
	encoder := client.NewProtobufEncoder(&config.Signer)
	manager := client.NewClient(sender, encoder)
	hostname, _ := os.Hostname()

	switch *action {
	case "load":
		for i := 0; i < *numItems; i++ {
			conf := fmt.Sprintf(confFmt, i, i)
			msg := &message.Message{}
			msg.SetType("heka.control.sandbox")
			msg.SetTimestamp(time.Now().UnixNano())
			msg.SetUuid(uuid.NewRandom())
			msg.SetHostname(hostname)
			msg.SetPayload(code)
			f, _ := message.NewField("config", conf, message.Field_RAW)
			msg.AddField(f)
			f1, _ := message.NewField("action", *action, message.Field_RAW)
			msg.AddField(f1)
			err = manager.SendMessage(msg)
			if err != nil {
				log.Printf("Error sending message: %s\n", err.Error())
			}
		}
	case "unload":
		for i := 0; i < *numItems; i++ {
			msg := &message.Message{}
			msg.SetType("heka.control.sandbox")
			msg.SetTimestamp(time.Now().UnixNano())
			msg.SetUuid(uuid.NewRandom())
			msg.SetHostname(hostname)
			msg.SetPayload(string(code))
			f, _ := message.NewField("name", fmt.Sprintf("CounterSandbox%d", i), message.Field_RAW)
			msg.AddField(f)
			f1, _ := message.NewField("action", *action, message.Field_RAW)
			msg.AddField(f1)
			err = manager.SendMessage(msg)
			if err != nil {
				log.Printf("Error sending message: %s\n", err.Error())
			}
		}

	default:
		log.Printf("Invalid action: %s\n", *action)
	}

}
Beispiel #18
0
func (ao *AMQPOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	conf := ao.config

	var (
		pack    *PipelinePack
		msg     *message.Message
		persist uint8
		ok      bool = true
		amqpMsg amqp.Publishing
		encoder client.Encoder
		msgBody []byte = make([]byte, 0, 500)
	)
	if conf.Persistent {
		persist = uint8(1)
	} else {
		persist = uint8(0)
	}
	encoder = client.NewProtobufEncoder(nil)

	for ok {
		select {
		case <-ao.closeChan:
			ok = false
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			msg = pack.Message
			if conf.Serialize {
				if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil {
					or.LogError(err)
					err = nil
					pack.Recycle()
					continue
				}
				amqpMsg = amqp.Publishing{
					DeliveryMode: persist,
					Timestamp:    time.Now(),
					ContentType:  "application/hekad",
					Body:         msgBody,
				}
			} else {
				amqpMsg = amqp.Publishing{
					DeliveryMode: persist,
					Timestamp:    time.Now(),
					ContentType:  "text/plain",
					Body:         []byte(msg.GetPayload()),
				}
			}
			err = ao.ch.Publish(conf.Exchange, conf.RoutingKey,
				false, false, amqpMsg)
			if err != nil {
				ok = false
			} else {
				pack.Recycle()
			}
			msgBody = msgBody[:0]
		}
	}
	ao.usageWg.Done()
	amqpHub.Close(conf.URL, ao.connWg)
	ao.connWg.Wait()
	return
}
Beispiel #19
0
func AMQPPluginSpec(c gs.Context) {
	t := &pipeline_ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	config := NewPipelineConfig(nil)

	// Our two user/conn waitgroups
	ug := new(sync.WaitGroup)
	cg := new(sync.WaitGroup)

	// Setup the mock channel
	mch := NewMockAMQPChannel(ctrl)

	// Setup the mock amqpHub with the mock chan return
	aqh := NewMockAMQPConnectionHub(ctrl)
	aqh.EXPECT().GetChannel("", AMQPDialer{}).Return(mch, ug, cg, nil)

	errChan := make(chan error, 1)

	c.Specify("An amqp input", func() {
		// Setup all the mock calls for Init
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)
		mch.EXPECT().QueueDeclare("", false, true, false, false,
			gomock.Any()).Return(amqp.Queue{}, nil)
		mch.EXPECT().QueueBind("", "test", "", false, gomock.Any()).Return(nil)
		mch.EXPECT().Qos(2, 0, false).Return(nil)

		ith := new(plugins_ts.InputTestHelper)
		ith.Msg = pipeline_ts.GetTestMessage()
		ith.Pack = NewPipelinePack(config.InputRecycleChan())

		// set up mock helper, decoder set, and packSupply channel
		ith.MockHelper = NewMockPluginHelper(ctrl)
		ith.MockInputRunner = NewMockInputRunner(ctrl)
		mockDRunner := NewMockDecoderRunner(ctrl)
		ith.PackSupply = make(chan *PipelinePack, 1)
		ith.DecodeChan = make(chan *PipelinePack)

		ith.MockInputRunner.EXPECT().InChan().Return(ith.PackSupply)

		amqpInput := new(AMQPInput)
		amqpInput.amqpHub = aqh
		config := amqpInput.ConfigStruct().(*AMQPInputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"
		config.QueueTTL = 300000

		c.Specify("with a valid setup and no decoder", func() {
			err := amqpInput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			c.Specify("consumes a message", func() {

				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(err, gs.IsNil)
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
			})
		})

		c.Specify("with a valid setup using a decoder", func() {
			decoderName := "defaultDecoder"
			config.Decoder = decoderName
			err := amqpInput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			// Mock up our default decoder runner and decoder.
			ith.MockInputRunner.EXPECT().Name().Return("AMQPInput")
			decCall := ith.MockHelper.EXPECT().DecoderRunner(decoderName, "AMQPInput-defaultDecoder")
			decCall.Return(mockDRunner, true)
			mockDecoder := NewMockDecoder(ctrl)
			mockDRunner.EXPECT().Decoder().Return(mockDecoder)

			c.Specify("consumes a message", func() {
				packs := []*PipelinePack{ith.Pack}
				mockDecoder.EXPECT().Decode(ith.Pack).Return(packs, nil)

				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
			})

			c.Specify("consumes a serialized message", func() {
				encoder := client.NewProtobufEncoder(nil)
				streamChan := make(chan amqp.Delivery, 1)

				msg := new(message.Message)
				msg.SetUuid(uuid.NewRandom())
				msg.SetTimestamp(time.Now().UnixNano())
				msg.SetType("logfile")
				msg.SetLogger("/a/nice/path")
				msg.SetSeverity(int32(0))
				msg.SetEnvVersion("0.2")
				msg.SetPid(0)
				msg.SetPayload("This is a message")
				msg.SetHostname("TestHost")

				msgBody := make([]byte, 0, 500)
				_ = encoder.EncodeMessageStream(msg, &msgBody)

				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)

				streamChan <- amqp.Delivery{
					ContentType:  "application/hekad",
					Body:         msgBody,
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the decoded pack
				mockDRunner.EXPECT().InChan().Return(ith.DecodeChan)

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				packRef := <-ith.DecodeChan
				c.Expect(ith.Pack, gs.Equals, packRef)
				// Ignore leading 5 bytes of encoded message as thats the header
				c.Expect(string(packRef.MsgBytes), gs.Equals, string(msgBody[5:]))
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(err, gs.IsNil)
			})
		})
	})

	c.Specify("An amqp output", func() {
		oth := plugins_ts.NewOutputTestHelper(ctrl)
		pConfig := NewPipelineConfig(nil)

		amqpOutput := new(AMQPOutput)
		amqpOutput.amqpHub = aqh
		config := amqpOutput.ConfigStruct().(*AMQPOutputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"

		closeChan := make(chan *amqp.Error)

		inChan := make(chan *PipelinePack, 1)

		mch.EXPECT().NotifyClose(gomock.Any()).Return(closeChan)
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)

		// Increase the usage since Run decrements it on close
		ug.Add(1)

		// Expect the close and the InChan calls
		aqh.EXPECT().Close("", cg)
		oth.MockOutputRunner.EXPECT().InChan().Return(inChan)

		msg := pipeline_ts.GetTestMessage()
		pack := NewPipelinePack(pConfig.InputRecycleChan())
		pack.Message = msg
		pack.Decoded = true

		c.Specify("publishes a plain message", func() {
			encoder := new(plugins.PayloadEncoder)
			econfig := encoder.ConfigStruct().(*plugins.PayloadEncoderConfig)
			econfig.AppendNewlines = false
			encoder.Init(econfig)
			payloadBytes, err := encoder.Encode(pack)

			config.Encoder = "PayloadEncoder"
			config.ContentType = "text/plain"
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(payloadBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()

			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})

		c.Specify("publishes a serialized message", func() {
			encoder := new(ProtobufEncoder)
			encoder.SetPipelineConfig(pConfig)
			encoder.Init(nil)
			protoBytes, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(protoBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()
			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})
	})
}
Beispiel #20
0
func main() {
	configFile := flag.String("config", "flood.toml", "Flood configuration file")
	configTest := flag.String("test", "default", "Test section to load")

	flag.Parse()

	if flag.NFlag() == 0 {
		flag.PrintDefaults()
		os.Exit(0)
	}

	var config FloodConfig
	if _, err := toml.DecodeFile(*configFile, &config); err != nil {
		log.Printf("Error decoding config file: %s", err)
		return
	}
	var test FloodTest
	var ok bool
	if test, ok = config[*configTest]; !ok {
		log.Printf("Configuration test: '%s' was not found", *configTest)
		return
	}

	if test.PprofFile != "" {
		profFile, err := os.Create(test.PprofFile)
		if err != nil {
			log.Fatalln(err)
		}
		pprof.StartCPUProfile(profFile)
		defer pprof.StopCPUProfile()
	}

	sender, err := client.NewNetworkSender(test.Sender, test.IpAddress)
	if err != nil {
		log.Fatalf("Error creating sender: %s\n", err.Error())
	}

	var unsignedEncoder client.Encoder
	var signedEncoder client.Encoder
	switch test.Encoder {
	case "json":
		unsignedEncoder = client.NewJsonEncoder(nil)
		signedEncoder = client.NewJsonEncoder(&test.Signer)
	case "protobuf":
		unsignedEncoder = client.NewProtobufEncoder(nil)
		signedEncoder = client.NewProtobufEncoder(&test.Signer)
	}

	var numTestMessages = 1
	var unsignedMessages [][]byte
	var signedMessages [][]byte

	rdm := &randomDataMaker{
		src:       rand.NewSource(time.Now().UnixNano()),
		asciiOnly: test.AsciiOnly,
	}

	if test.VariableSizeMessages {
		numTestMessages = 64
		unsignedMessages = makeVariableMessage(unsignedEncoder, numTestMessages, rdm)
		signedMessages = makeVariableMessage(signedEncoder, numTestMessages, rdm)
	} else {
		if test.StaticMessageSize == 0 {
			test.StaticMessageSize = 1000
		}
		unsignedMessages = makeFixedMessage(unsignedEncoder, test.StaticMessageSize,
			rdm)
		signedMessages = makeFixedMessage(signedEncoder, test.StaticMessageSize,
			rdm)
	}
	// wait for sigint
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT)
	var msgsSent, bytesSent uint64
	var corruptPercentage, lastCorruptPercentage, signedPercentage, lastSignedPercentage float64
	var corrupt bool

	// set up counter loop
	ticker := time.NewTicker(time.Duration(time.Second))
	go timerLoop(&msgsSent, &bytesSent, ticker)

	test.CorruptPercentage /= 100.0
	test.SignedPercentage /= 100.0

	var buf []byte
	for gotsigint := false; !gotsigint; {
		runtime.Gosched()
		select {
		case <-sigChan:
			gotsigint = true
			continue
		default:
		}
		msgId := rand.Int() % numTestMessages
		corruptPercentage = math.Floor(float64(msgsSent) * test.CorruptPercentage)
		if corruptPercentage != lastCorruptPercentage {
			lastCorruptPercentage = corruptPercentage
			corrupt = true
		} else {
			corrupt = false
		}
		signedPercentage = math.Floor(float64(msgsSent) * test.SignedPercentage)
		if signedPercentage != lastSignedPercentage {
			lastSignedPercentage = signedPercentage
			buf = signedMessages[msgId]
		} else {
			buf = unsignedMessages[msgId]
		}
		bytesSent += uint64(len(buf))
		err = sendMessage(sender, buf, corrupt)
		if err != nil {
			if !strings.Contains(err.Error(), "connection refused") {
				log.Printf("Error sending message: %s\n",
					err.Error())
			}
		} else {
			msgsSent++
			if test.NumMessages != 0 && msgsSent >= test.NumMessages {
				break
			}
		}
	}
	sender.Close()
	log.Println("Clean shutdown: ", msgsSent, " messages sent")
}