예제 #1
0
func (s *SandboxDecoder) Init(config interface{}) (err error) {
	s.sbc = config.(*SandboxConfig)
	s.sbc.ScriptFilename = pipeline.PrependShareDir(s.sbc.ScriptFilename)
	s.sampleDenominator = pipeline.Globals().SampleDenominator

	s.tz = time.UTC
	if tz, ok := s.sbc.Config["tz"]; ok {
		s.tz, err = time.LoadLocation(tz.(string))
		if err != nil {
			return
		}
	}

	data_dir := pipeline.PrependBaseDir(DATA_DIR)
	if !fileExists(data_dir) {
		err = os.MkdirAll(data_dir, 0700)
		if err != nil {
			return
		}
	}

	switch s.sbc.ScriptType {
	case "lua":
	default:
		return fmt.Errorf("unsupported script type: %s", s.sbc.ScriptType)
	}

	s.sample = true
	return
}
예제 #2
0
func (s *SandboxEncoder) Init(config interface{}) (err error) {
	conf := config.(*SandboxEncoderConfig)
	s.sbc = &sandbox.SandboxConfig{
		ScriptType:       conf.ScriptType,
		ScriptFilename:   conf.ScriptFilename,
		ModuleDirectory:  conf.ModuleDirectory,
		PreserveData:     conf.PreserveData,
		MemoryLimit:      conf.MemoryLimit,
		InstructionLimit: conf.InstructionLimit,
		OutputLimit:      conf.OutputLimit,
		Profile:          conf.Profile,
		Config:           conf.Config,
	}
	s.sbc.ScriptFilename = pipeline.PrependShareDir(s.sbc.ScriptFilename)
	s.sampleDenominator = pipeline.Globals().SampleDenominator

	s.tz = time.UTC
	if tz, ok := s.sbc.Config["tz"]; ok {
		if s.tz, err = time.LoadLocation(tz.(string)); err != nil {
			return
		}
	}

	dataDir := pipeline.PrependBaseDir(sandbox.DATA_DIR)
	if !fileExists(dataDir) {
		if err = os.MkdirAll(dataDir, 0700); err != nil {
			return
		}
	}

	switch s.sbc.ScriptType {
	case "lua":
		s.sb, err = lua.CreateLuaSandbox(s.sbc)
	default:
		return fmt.Errorf("Unsupported script type: %s", s.sbc.ScriptType)
	}

	if err != nil {
		return fmt.Errorf("Sandbox creation failed: '%s'", err)
	}

	s.preservationFile = filepath.Join(dataDir, s.name+sandbox.DATA_EXT)
	if s.sbc.PreserveData && fileExists(s.preservationFile) {
		err = s.sb.Init(s.preservationFile, "encoder")
	} else {
		err = s.sb.Init("", "encoder")
	}
	if err != nil {
		return fmt.Errorf("Sandbox initialization failed: %s", err)
	}

	s.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		s.injected = true
		s.output = []byte(payload)
		return 0
	})
	s.sample = true
	s.cEncoder = client.NewProtobufEncoder(nil)
	return
}
예제 #3
0
// Determines the script type and creates interpreter
func (this *SandboxFilter) Init(config interface{}) (err error) {
	if this.sb != nil {
		return nil // no-op already initialized
	}
	this.sbc = config.(*SandboxConfig)
	this.sbc.ScriptFilename = pipeline.PrependShareDir(this.sbc.ScriptFilename)
	this.sampleDenominator = pipeline.Globals().SampleDenominator

	data_dir := pipeline.PrependBaseDir(DATA_DIR)
	if !fileExists(data_dir) {
		err = os.MkdirAll(data_dir, 0700)
		if err != nil {
			return
		}
	}

	switch this.sbc.ScriptType {
	case "lua":
		this.sb, err = lua.CreateLuaSandbox(this.sbc)
		if err != nil {
			return
		}
	default:
		return fmt.Errorf("unsupported script type: %s", this.sbc.ScriptType)
	}

	this.preservationFile = filepath.Join(data_dir, this.name+DATA_EXT)
	if this.sbc.PreserveData && fileExists(this.preservationFile) {
		err = this.sb.Init(this.preservationFile, "filter")
	} else {
		err = this.sb.Init("", "filter")
	}

	return
}
예제 #4
0
func (li *LogstreamerInput) ConfigStruct() interface{} {
	return &LogstreamerInputConfig{
		RescanInterval:   "1m",
		ParserType:       "token",
		OldestDuration:   "720h",
		LogDirectory:     "/var/log",
		JournalDirectory: filepath.Join(p.Globals().BaseDir, "logstreamer"),
	}
}
예제 #5
0
func (s *SandboxDecoder) Decode(pack *pipeline.PipelinePack) (packs []*pipeline.PipelinePack, err error) {
	if s.sb == nil {
		err = fmt.Errorf("SandboxDecoder has been terminated")
		return
	}
	s.pack = pack
	atomic.AddInt64(&s.processMessageCount, 1)

	var startTime time.Time
	if s.sample {
		startTime = time.Now()
	}
	retval := s.sb.ProcessMessage(s.pack)
	if s.sample {
		duration := time.Since(startTime).Nanoseconds()
		s.reportLock.Lock()
		s.processMessageDuration += duration
		s.processMessageSamples++
		s.reportLock.Unlock()
	}
	s.sample = 0 == rand.Intn(s.sampleDenominator)
	if retval > 0 {
		s.err = fmt.Errorf("FATAL: %s", s.sb.LastError())
		s.dRunner.LogError(s.err)
		pipeline.Globals().ShutDown()
	}
	if retval < 0 {
		atomic.AddInt64(&s.processMessageFailures, 1)
		if s.pack != nil {
			s.err = fmt.Errorf("Failed parsing: %s", s.pack.Message.GetPayload())
		} else {
			s.err = fmt.Errorf("Failed after a successful inject_message call")
		}
		if len(s.packs) > 1 {
			for _, p := range s.packs[1:] {
				p.Recycle()
			}
		}
		s.packs = nil
	}
	if retval == 0 && s.pack != nil {
		// InjectMessage was never called, we're passing the original message
		// through.
		packs = append(packs, pack)
		s.pack = nil
	} else {
		packs = s.packs
	}
	s.packs = nil
	err = s.err
	return
}
예제 #6
0
func (s *SandboxDecoder) Decode(pack *pipeline.PipelinePack) (packs []*pipeline.PipelinePack, err error) {
	if s.sb == nil {
		err = s.err
		return
	}
	s.pack = pack
	atomic.AddInt64(&s.processMessageCount, 1)

	var startTime time.Time
	if s.sample {
		startTime = time.Now()
	}
	retval := s.sb.ProcessMessage(s.pack)
	if s.sample {
		duration := time.Since(startTime).Nanoseconds()
		s.reportLock.Lock()
		s.processMessageDuration += duration
		s.processMessageSamples++
		s.reportLock.Unlock()
	}
	s.sample = 0 == rand.Intn(pipeline.DURATION_SAMPLE_DENOMINATOR)
	if retval > 0 {
		s.err = errors.New("FATAL: " + s.sb.LastError())
		s.dRunner.LogError(s.err)
		pipeline.Globals().ShutDown()
	}
	if retval < 0 {
		atomic.AddInt64(&s.processMessageFailures, 1)
		s.err = fmt.Errorf("Failed parsing: %s", s.pack.Message.GetPayload())
		if len(s.packs) > 1 {
			for _, p := range s.packs[1:] {
				p.Recycle()
			}
		}
		s.packs = nil
	}
	packs = s.packs
	s.packs = nil
	err = s.err
	return
}
예제 #7
0
func FilterSpec(c gs.Context) {
	t := new(ts.SimpleT)
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	fth := NewFilterTestHelper(ctrl)
	inChan := make(chan *pipeline.PipelinePack, 1)
	pConfig := pipeline.NewPipelineConfig(nil)

	c.Specify("A SandboxFilter", func() {
		sbFilter := new(SandboxFilter)
		config := sbFilter.ConfigStruct().(*sandbox.SandboxConfig)
		config.ScriptType = "lua"
		config.MemoryLimit = 32000
		config.InstructionLimit = 1000
		config.OutputLimit = 1024

		msg := getTestMessage()
		pack := pipeline.NewPipelinePack(pConfig.InjectRecycleChan())
		pack.Message = msg
		pack.Decoded = true

		c.Specify("Over inject messages from ProcessMessage", func() {
			var timer <-chan time.Time
			fth.MockFilterRunner.EXPECT().Ticker().Return(timer)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)
			fth.MockFilterRunner.EXPECT().Name().Return("processinject").Times(3)
			fth.MockFilterRunner.EXPECT().Inject(pack).Return(true).Times(2)
			fth.MockHelper.EXPECT().PipelineConfig().Return(pConfig)
			fth.MockHelper.EXPECT().PipelinePack(uint(0)).Return(pack).Times(2)
			fth.MockFilterRunner.EXPECT().LogError(fmt.Errorf("exceeded InjectMessage count"))

			config.ScriptFilename = "../lua/testsupport/processinject.lua"
			err := sbFilter.Init(config)
			c.Assume(err, gs.IsNil)
			inChan <- pack
			close(inChan)
			sbFilter.Run(fth.MockFilterRunner, fth.MockHelper)
		})

		c.Specify("Over inject messages from TimerEvent", func() {
			var timer <-chan time.Time
			timer = time.Tick(time.Duration(1) * time.Millisecond)
			fth.MockFilterRunner.EXPECT().Ticker().Return(timer)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)
			fth.MockFilterRunner.EXPECT().Name().Return("timerinject").Times(12)
			fth.MockFilterRunner.EXPECT().Inject(pack).Return(true).Times(11)
			fth.MockHelper.EXPECT().PipelineConfig().Return(pConfig)
			fth.MockHelper.EXPECT().PipelinePack(uint(0)).Return(pack).Times(11)
			fth.MockFilterRunner.EXPECT().LogError(fmt.Errorf("exceeded InjectMessage count"))

			config.ScriptFilename = "../lua/testsupport/timerinject.lua"
			err := sbFilter.Init(config)
			c.Assume(err, gs.IsNil)
			go func() {
				time.Sleep(time.Duration(250) * time.Millisecond)
				close(inChan)
			}()
			sbFilter.Run(fth.MockFilterRunner, fth.MockHelper)
		})

		c.Specify("Preserves data", func() {
			var timer <-chan time.Time
			fth.MockFilterRunner.EXPECT().Ticker().Return(timer)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)

			config.ScriptFilename = "../lua/testsupport/serialize.lua"
			config.PreserveData = true
			sbFilter.SetName("serialize")
			err := sbFilter.Init(config)
			c.Assume(err, gs.IsNil)
			close(inChan)
			sbFilter.Run(fth.MockFilterRunner, fth.MockHelper)
			_, err = os.Stat("sandbox_preservation/serialize.data")
			c.Expect(err, gs.IsNil)
			err = os.Remove("sandbox_preservation/serialize.data")
			c.Expect(err, gs.IsNil)
		})
	})

	c.Specify("A SandboxManagerFilter", func() {
		sbmFilter := new(SandboxManagerFilter)
		config := sbmFilter.ConfigStruct().(*SandboxManagerFilterConfig)
		config.MaxFilters = 1

		origBaseDir := pipeline.Globals().BaseDir
		pipeline.Globals().BaseDir = os.TempDir()
		sbxMgrsDir := filepath.Join(pipeline.Globals().BaseDir, "sbxmgrs")
		defer func() {
			pipeline.Globals().BaseDir = origBaseDir
			tmpErr := os.RemoveAll(sbxMgrsDir)
			c.Expect(tmpErr, gs.IsNil)
		}()

		msg := getTestMessage()
		pack := pipeline.NewPipelinePack(pConfig.InputRecycleChan())
		pack.Message = msg
		pack.Decoded = true

		c.Specify("Control message in the past", func() {
			sbmFilter.Init(config)
			pack.Message.SetTimestamp(time.Now().UnixNano() - 5e9)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)
			fth.MockFilterRunner.EXPECT().Name().Return("SandboxManagerFilter")
			fth.MockFilterRunner.EXPECT().LogError(fmt.Errorf("Discarded control message: 5 seconds skew"))
			inChan <- pack
			close(inChan)
			sbmFilter.Run(fth.MockFilterRunner, fth.MockHelper)
		})

		c.Specify("Control message in the future", func() {
			sbmFilter.Init(config)
			pack.Message.SetTimestamp(time.Now().UnixNano() + 5.9e9)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)
			fth.MockFilterRunner.EXPECT().Name().Return("SandboxManagerFilter")
			fth.MockFilterRunner.EXPECT().LogError(fmt.Errorf("Discarded control message: -5 seconds skew"))
			inChan <- pack
			close(inChan)
			sbmFilter.Run(fth.MockFilterRunner, fth.MockHelper)
		})

		c.Specify("Generates the right default working directory", func() {
			sbmFilter.Init(config)
			fth.MockFilterRunner.EXPECT().InChan().Return(inChan)
			name := "SandboxManagerFilter"
			fth.MockFilterRunner.EXPECT().Name().Return(name)
			close(inChan)
			sbmFilter.Run(fth.MockFilterRunner, fth.MockHelper)
			c.Expect(sbmFilter.workingDirectory, gs.Equals, sbxMgrsDir)
			_, err := os.Stat(sbxMgrsDir)
			c.Expect(err, gs.IsNil)
		})

		c.Specify("Sanity check the default sandbox configuration limits", func() {
			sbmFilter.Init(config)
			c.Expect(sbmFilter.memoryLimit, gs.Equals, uint(8*1024*1024))
			c.Expect(sbmFilter.instructionLimit, gs.Equals, uint(1e6))
			c.Expect(sbmFilter.outputLimit, gs.Equals, uint(63*1024))
		})

		c.Specify("Sanity check the user specified sandbox configuration limits", func() {
			config.MemoryLimit = 123456
			config.InstructionLimit = 4321
			config.OutputLimit = 8765
			sbmFilter.Init(config)
			c.Expect(sbmFilter.memoryLimit, gs.Equals, config.MemoryLimit)
			c.Expect(sbmFilter.instructionLimit, gs.Equals, config.InstructionLimit)
			c.Expect(sbmFilter.outputLimit, gs.Equals, config.OutputLimit)
		})
	})
}
예제 #8
0
func (s *SandboxDecoder) SetDecoderRunner(dr pipeline.DecoderRunner) {
	if s.sb != nil {
		return // no-op already initialized
	}

	s.dRunner = dr
	var original *message.Message

	switch s.sbc.ScriptType {
	case "lua":
		s.sb, s.err = lua.CreateLuaSandbox(s.sbc)
	default:
		s.err = fmt.Errorf("unsupported script type: %s", s.sbc.ScriptType)
	}

	if s.err == nil {
		s.preservationFile = filepath.Join(pipeline.PrependBaseDir(DATA_DIR), dr.Name()+DATA_EXT)
		if s.sbc.PreserveData && fileExists(s.preservationFile) {
			s.err = s.sb.Init(s.preservationFile, "decoder")
		} else {
			s.err = s.sb.Init("", "decoder")
		}
	}
	if s.err != nil {
		dr.LogError(s.err)
		pipeline.Globals().ShutDown()
		return
	}

	s.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		if s.pack == nil {
			s.pack = dr.NewPack()
			if original == nil && len(s.packs) > 0 {
				original = s.packs[0].Message // payload injections have the original header data in the first pack
			}
		} else {
			original = nil // processing a new message, clear the old message
		}
		if len(payload_type) == 0 { // heka protobuf message
			if original == nil {
				original = new(message.Message)
				copyMessageHeaders(original, s.pack.Message) // save off the header values since unmarshal will wipe them out
			}
			if nil != proto.Unmarshal([]byte(payload), s.pack.Message) {
				return 1
			}
			if s.tz != time.UTC {
				const layout = "2006-01-02T15:04:05.999999999" // remove the incorrect UTC tz info
				t := time.Unix(0, s.pack.Message.GetTimestamp())
				t = t.In(time.UTC)
				ct, _ := time.ParseInLocation(layout, t.Format(layout), s.tz)
				s.pack.Message.SetTimestamp(ct.UnixNano())
			}
		} else {
			s.pack.Message.SetPayload(payload)
			ptype, _ := message.NewField("payload_type", payload_type, "file-extension")
			s.pack.Message.AddField(ptype)
			pname, _ := message.NewField("payload_name", payload_name, "")
			s.pack.Message.AddField(pname)
		}
		if original != nil {
			// if future injections fail to set the standard headers, use the values
			// from the original message.
			if s.pack.Message.Uuid == nil {
				s.pack.Message.SetUuid(original.GetUuid())
			}
			if s.pack.Message.Timestamp == nil {
				s.pack.Message.SetTimestamp(original.GetTimestamp())
			}
			if s.pack.Message.Type == nil {
				s.pack.Message.SetType(original.GetType())
			}
			if s.pack.Message.Hostname == nil {
				s.pack.Message.SetHostname(original.GetHostname())
			}
			if s.pack.Message.Logger == nil {
				s.pack.Message.SetLogger(original.GetLogger())
			}
			if s.pack.Message.Severity == nil {
				s.pack.Message.SetSeverity(original.GetSeverity())
			}
			if s.pack.Message.Pid == nil {
				s.pack.Message.SetPid(original.GetPid())
			}
		}
		s.packs = append(s.packs, s.pack)
		s.pack = nil
		return 0
	})
}
예제 #9
0
func (this *SandboxFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) {
	inChan := fr.InChan()
	ticker := fr.Ticker()

	var (
		ok             = true
		terminated     = false
		sample         = true
		blocking       = false
		backpressure   = false
		pack           *pipeline.PipelinePack
		retval         int
		msgLoopCount   uint
		injectionCount uint
		startTime      time.Time
		slowDuration   int64 = int64(pipeline.Globals().MaxMsgProcessDuration)
		duration       int64
		capacity       = cap(inChan) - 1
	)

	this.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		if injectionCount == 0 {
			fr.LogError(fmt.Errorf("exceeded InjectMessage count"))
			return 1
		}
		injectionCount--
		pack := h.PipelinePack(msgLoopCount)
		if pack == nil {
			fr.LogError(fmt.Errorf("exceeded MaxMsgLoops = %d",
				pipeline.Globals().MaxMsgLoops))
			return 1
		}
		if len(payload_type) == 0 { // heka protobuf message
			hostname := pack.Message.GetHostname()
			err := proto.Unmarshal([]byte(payload), pack.Message)
			if err == nil {
				// do not allow filters to override the following
				pack.Message.SetType("heka.sandbox." + pack.Message.GetType())
				pack.Message.SetLogger(fr.Name())
				pack.Message.SetHostname(hostname)
			} else {
				return 1
			}
		} else {
			pack.Message.SetType("heka.sandbox-output")
			pack.Message.SetLogger(fr.Name())
			pack.Message.SetPayload(payload)
			ptype, _ := message.NewField("payload_type", payload_type, "file-extension")
			pack.Message.AddField(ptype)
			pname, _ := message.NewField("payload_name", payload_name, "")
			pack.Message.AddField(pname)
		}
		if !fr.Inject(pack) {
			return 1
		}
		atomic.AddInt64(&this.injectMessageCount, 1)
		return 0
	})

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			atomic.AddInt64(&this.processMessageCount, 1)
			injectionCount = pipeline.Globals().MaxMsgProcessInject
			msgLoopCount = pack.MsgLoopCount

			// reading a channel length is generally fast ~1ns
			// we need to check the entire chain back to the router
			backpressure = len(inChan) >= capacity ||
				fr.MatchRunner().InChanLen() >= capacity ||
				len(h.PipelineConfig().Router().InChan()) >= capacity

			// performing the timing is expensive ~40ns but if we are
			// backpressured we need a decent sample set before triggering
			// termination
			if sample ||
				(backpressure && this.processMessageSamples < int64(capacity)) ||
				this.sbc.Profile {
				startTime = time.Now()
				sample = true
			}
			retval = this.sb.ProcessMessage(pack)
			if sample {
				duration = time.Since(startTime).Nanoseconds()
				this.reportLock.Lock()
				this.processMessageDuration += duration
				this.processMessageSamples++
				if this.sbc.Profile {
					this.profileMessageDuration = this.processMessageDuration
					this.profileMessageSamples = this.processMessageSamples
					if this.profileMessageSamples == int64(capacity)*10 {
						this.sbc.Profile = false
						// reset the normal sampling so it isn't heavily skewed by the profile values
						// i.e. process messages fast during profiling and then switch to malicious code
						this.processMessageDuration = this.profileMessageDuration / this.profileMessageSamples
						this.processMessageSamples = 1
					}
				}
				this.reportLock.Unlock()
			}
			if retval <= 0 {
				if backpressure && this.processMessageSamples >= int64(capacity) {
					if this.processMessageDuration/this.processMessageSamples > slowDuration ||
						fr.MatchRunner().GetAvgDuration() > slowDuration/5 {
						terminated = true
						blocking = true
					}
				}
				if retval < 0 {
					atomic.AddInt64(&this.processMessageFailures, 1)
				}
				sample = 0 == rand.Intn(pipeline.DURATION_SAMPLE_DENOMINATOR)
			} else {
				terminated = true
			}
			pack.Recycle()

		case t := <-ticker:
			injectionCount = pipeline.Globals().MaxMsgTimerInject
			startTime = time.Now()
			if retval = this.sb.TimerEvent(t.UnixNano()); retval != 0 {
				terminated = true
			}
			duration = time.Since(startTime).Nanoseconds()
			this.reportLock.Lock()
			this.timerEventDuration += duration
			this.timerEventSamples++
			this.reportLock.Unlock()
		}

		if terminated {
			pack := h.PipelinePack(0)
			pack.Message.SetType("heka.sandbox-terminated")
			pack.Message.SetLogger(fr.Name())
			if blocking {
				pack.Message.SetPayload("sandbox is running slowly and blocking the router")
				// no lock on the ProcessMessage variables here because there are no active writers
				message.NewInt64Field(pack.Message, "ProcessMessageCount", this.processMessageCount, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageFailures", this.processMessageFailures, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageSamples", this.processMessageSamples, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageAvgDuration",
					this.processMessageDuration/this.processMessageSamples, "ns")
				message.NewInt64Field(pack.Message, "MatchAvgDuration", fr.MatchRunner().GetAvgDuration(), "ns")
				message.NewIntField(pack.Message, "FilterChanLength", len(inChan), "count")
				message.NewIntField(pack.Message, "MatchChanLength", fr.MatchRunner().InChanLen(), "count")
				message.NewIntField(pack.Message, "RouterChanLength", len(h.PipelineConfig().Router().InChan()), "count")
			} else {
				pack.Message.SetPayload(this.sb.LastError())
			}
			fr.Inject(pack)
			break
		}
	}

	if terminated {
		go h.PipelineConfig().RemoveFilterRunner(fr.Name())
		// recycle any messages until the matcher is torn down
		for pack = range inChan {
			pack.Recycle()
		}
	}

	if this.manager != nil {
		this.manager.PluginExited()
	}
	if this.sbc.PreserveData {
		this.sb.Destroy(this.preservationFile)
	} else {
		this.sb.Destroy("")
	}
	this.sb = nil
	return
}