Пример #1
0
func (sip *SCAMPInputPlugin) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) {
	sip.service, err = scamp.NewService(sip.conf.Service, sip.conf.Name)
	if err != nil {
		return
	}

	announcer, err := scamp.NewDiscoveryAnnouncer()
	if err != nil {
		scamp.Error.Printf("failed to create announcer: `%s`", err)
		return
	}
	announcer.Track(sip.service)
	go announcer.AnnounceLoop()

	var handlerConfig SCAMPInputHandlerConfig
	for _, handlerConfig = range sip.conf.Handlers {
		scamp.Trace.Printf("registering handler: `%s`", handlerConfig)

		sip.service.Register(handlerConfig.Action, func(msg *scamp.Message, client *scamp.Client) {
			var pack *pipeline.PipelinePack

			pack = <-ir.InChan()
			pack.Message.SetUuid(uuid.NewRandom())
			pack.Message.SetTimestamp(time.Now().UnixNano())
			pack.Message.SetPayload(string(msg.Bytes()[:]))
			pack.Message.SetSeverity(int32(handlerConfig.Severity))
			pack.Message.SetLogger(handlerConfig.Logger) // TODO not sure what this means
			ir.Deliver(pack)

			reply := scamp.NewMessage()
			reply.SetMessageType(scamp.MESSAGE_TYPE_REPLY)
			reply.SetEnvelope(scamp.ENVELOPE_JSON)
			reply.SetRequestId(msg.RequestId)
			reply.Write([]byte("{}"))

			scamp.Trace.Printf("sending msg: {requestId: %d, type: `%s`, envelope: `%s`, body: `%s`}", reply.RequestId, reply.MessageType, reply.Envelope, reply.Bytes())

			_, err = client.Send(reply)
			if err != nil {
				scamp.Error.Printf("could not reply to message: `%s`", err)
				client.Close()
				return
			}
		})
	}

	sip.service.Run()
	return
}
Пример #2
0
func (di *DockerLogInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	var (
		pack *pipeline.PipelinePack
		ok   bool
	)

	hostname := h.Hostname()

	go di.attachMgr.Listen(di.logstream, di.closer)

	// Get the InputRunner's chan to receive empty PipelinePacks
	packSupply := ir.InChan()

	ok = true
	var err error
	for ok {
		select {
		case logline := <-di.logstream:
			pack = <-packSupply

			pack.Message.SetType("DockerLog")
			pack.Message.SetLogger(logline.Type) // stderr or stdout
			pack.Message.SetHostname(hostname)   // Use the host's hosntame
			pack.Message.SetPayload(logline.Data)
			pack.Message.SetTimestamp(time.Now().UnixNano())
			pack.Message.SetUuid(uuid.NewRandom())
			for k, v := range logline.Fields {
				message.NewStringField(pack.Message, k, v)
			}

			ir.Deliver(pack)

		case err, ok = <-di.attachErrors:
			if !ok {
				err = errors.New("Docker event channel closed")
				break
			}
			ir.LogError(fmt.Errorf("Attacher error: %s", err))

		case err = <-di.stopChan:
			ok = false
		}
	}

	di.closer <- struct{}{}
	close(di.logstream)
	return err
}
Пример #3
0
func (dei *DockerEventInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	defer dei.dockerClient.RemoveEventListener(dei.eventStream)
	defer close(dei.eventStream)
	var (
		ok   bool
		err  error
		pack *pipeline.PipelinePack
	)
	hostname := h.Hostname()

	// Provides empty PipelinePacks
	packSupply := ir.InChan()

	ok = true
	for ok {
		select {
		case event := <-dei.eventStream:
			pack = <-packSupply
			pack.Message.SetType("DockerEvent")
			pack.Message.SetLogger(event.ID)
			pack.Message.SetHostname(hostname)

			payload := fmt.Sprintf("id:%s status:%s from:%s time:%d", event.ID, event.Status, event.From, event.Time)
			pack.Message.SetPayload(payload)
			pack.Message.SetTimestamp(time.Now().UnixNano())
			pack.Message.SetUuid(uuid.NewRandom())
			message.NewStringField(pack.Message, "ID", event.ID)
			message.NewStringField(pack.Message, "Status", event.Status)
			message.NewStringField(pack.Message, "From", event.From)
			message.NewInt64Field(pack.Message, "Time", event.Time, "ts")
			ir.Deliver(pack)
		case err = <-dei.stopChan:
			ok = false
		}
	}
	return err
}
func (input *DockerStatsInput) Run(runner pipeline.InputRunner,
	helper pipeline.PluginHelper) error {

	var pack *pipeline.PipelinePack

	input.runner = runner
	packSupply := runner.InChan()
	tickChan := runner.Ticker()

	hostname := helper.PipelineConfig().Hostname()

	for {
		select {
		case <-input.stop:
			return nil
		case <-tickChan:
		}
		var (
			// test                        chan bool
			//err                         error
			previousCPU, previousSystem uint64
			mstats                      *dockerStat
			preCPUStats, stats          *docker.Stats
		)
		endpoint := "unix:///var/run/docker.sock"
		client, _ := docker.NewClient(endpoint)
		containers, _ := client.ListContainers(docker.ListContainersOptions{Filters: map[string][]string{"status": {"running"}}})
		for _, container := range containers {
			if containerName, exists := input.cacheHostnames[container.ID]; !exists {
				containerName = strings.Replace(container.Names[0], "/", "", -1)
				input.cacheHostnames[container.ID] = containerName
				if input.NameFromEnv != "" {
					con, _ := client.InspectContainer(container.ID)
					for _, value := range con.Config.Env {
						parts := strings.SplitN(value, "=", 2)
						if len(parts) == 2 {
							if input.NameFromEnv == parts[0] {
								containerName = parts[1]
								input.cacheHostnames[container.ID] = containerName
								break
							}
						}
					}
				}
			}
			opts := docker.StatsStaticOptions{ID: container.ID, Stream: false}
			preCPUStats, _ = client.StatsStatic(opts)
			if preCPUStats == nil {
				continue
			}
			previousCPU = preCPUStats.CPUStats.CPUUsage.TotalUsage
			previousSystem = preCPUStats.CPUStats.SystemCPUUsage

			stats, _ = client.StatsStatic(opts)
			if stats == nil {
				continue
			}
			mstats = &dockerStat{}
			mstats.CPUPercent = calculateCPUPercent(previousCPU, previousSystem, stats)
			mstats.MemPercent = calculateMemPercent(stats)
			mstats.MemUsage = stats.MemoryStats.Usage
			mstats.MemLimit = stats.MemoryStats.Limit
			mstats.BlockRead, mstats.BlockWrite = calculateBlockIO(stats)
			for _, networkstat := range stats.Networks {
				mstats.NetworkRx = networkstat.RxBytes
				mstats.NetworkTx = networkstat.TxBytes
			}
			pack = <-packSupply
			pack.Message.SetUuid(uuid.NewRandom())
			pack.Message.SetTimestamp(time.Now().Unix())
			pack.Message.SetType("DockerStats")
			pack.Message.SetHostname(hostname)

			containerName, _ := message.NewField("ContainerName", string(strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1)), "")
			pack.Message.AddField(containerName)
			cpuPercent, _ := message.NewField("CPUPercent", float64(mstats.CPUPercent), "")
			pack.Message.AddField(cpuPercent)
			memPercent, _ := message.NewField("MemoryPercent", float64(mstats.MemPercent), "")
			pack.Message.AddField(memPercent)
			memLimit, _ := message.NewField("MemoryLimit", int64(mstats.MemLimit), "")
			pack.Message.AddField(memLimit)
			memUsage, _ := message.NewField("MemoryUsage", int64(mstats.MemUsage), "")
			pack.Message.AddField(memUsage)
			netInput, _ := message.NewField("NetworkInput", int64(mstats.NetworkRx), "")
			pack.Message.AddField(netInput)
			netOutput, _ := message.NewField("NetworkOutput", int64(mstats.NetworkTx), "")
			pack.Message.AddField(netOutput)
			blockInput, _ := message.NewField("BlockInput", int64(mstats.BlockRead), "")
			pack.Message.AddField(blockInput)
			blockOutput, _ := message.NewField("BlockOutput", int64(mstats.BlockWrite), "")
			pack.Message.AddField(blockOutput)

			pack.Message.SetPayload(fmt.Sprintf("container_name %s\ncpu %.2f\nmem_usage %d\nmem_limit %d\nmem %.2f\nnet_input %d\nnet_output %d\nblock_input %d\nblock_output %d",
				strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1),
				mstats.CPUPercent,
				mstats.MemUsage,
				mstats.MemLimit,
				mstats.MemPercent,
				mstats.NetworkRx,
				mstats.NetworkTx,
				mstats.BlockRead,
				mstats.BlockWrite))
			runner.Deliver(pack)
		}
	}
	return nil
}