func (out *fileOutput) init(beat string, config *outputs.MothershipConfig, topology_expire int) error { out.rotator.Path = config.Path out.rotator.Name = config.Filename if out.rotator.Name == "" { out.rotator.Name = beat } logp.Info("File output base filename set to: %v", out.rotator.Name) // disable bulk support configDisableInt := -1 config.FlushInterval = &configDisableInt config.BulkMaxSize = &configDisableInt rotateeverybytes := uint64(config.RotateEveryKb) * 1024 if rotateeverybytes == 0 { rotateeverybytes = 10 * 1024 * 1024 } logp.Info("Rotate every bytes set to: %v", rotateeverybytes) out.rotator.RotateEveryBytes = &rotateeverybytes keepfiles := config.NumberOfFiles if keepfiles == 0 { keepfiles = 7 } logp.Info("Number of files set to: %v", keepfiles) out.rotator.KeepFiles = &keepfiles err := out.rotator.CreateDirectory() if err != nil { return err } err = out.rotator.CheckIfConfigSane() if err != nil { return err } return nil }
// Initialize Elasticsearch as output func (out *ElasticsearchOutput) Init(config outputs.MothershipConfig, topology_expire int) error { if len(config.Protocol) == 0 { config.Protocol = "http" } url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path) con := NewElasticsearch(url, config.Username, config.Password) out.Conn = con if config.Index != "" { out.Index = config.Index } else { out.Index = "packetbeat" } out.TopologyExpire = 15000 if topology_expire != 0 { out.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec } out.FlushInterval = 1000 * time.Millisecond if config.Flush_interval != nil { out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond } out.BulkMaxSize = 10000 if config.Bulk_size != nil { out.BulkMaxSize = *config.Bulk_size } err := out.EnableTTL() if err != nil { logp.Err("Fail to set _ttl mapping: %s", err) return err } out.sendingQueue = make(chan BulkMsg, 1000) go out.SendMessagesGoroutine() logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", url) logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index) logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000) if out.FlushInterval > 0 { logp.Info("[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.", out.FlushInterval, out.BulkMaxSize) } else { logp.Info("[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.") } return nil }
func (out *fileOutput) init(beat string, config *outputs.MothershipConfig, topology_expire int) error { out.rotator.Path = config.Path out.rotator.Name = config.Filename if out.rotator.Name == "" { out.rotator.Name = beat } // disable bulk support configDisableInt := -1 config.Flush_interval = &configDisableInt config.Bulk_size = &configDisableInt rotateeverybytes := uint64(config.Rotate_every_kb) * 1024 if rotateeverybytes == 0 { rotateeverybytes = 10 * 1024 * 1024 } out.rotator.RotateEveryBytes = &rotateeverybytes keepfiles := config.Number_of_files if keepfiles == 0 { keepfiles = 7 } out.rotator.KeepFiles = &keepfiles err := out.rotator.CreateDirectory() if err != nil { return err } err = out.rotator.CheckIfConfigSane() if err != nil { return err } return nil }
// NewOutput instantiates a new output plugin instance publishing to elasticsearch. func (f elasticsearchOutputPlugin) NewOutput( beat string, config *outputs.MothershipConfig, topologyExpire int, ) (outputs.Outputer, error) { // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } output := &elasticsearchOutput{} err := output.init(beat, *config, topologyExpire) if err != nil { return nil, err } return output, nil }
// Initialize Elasticsearch as output func (out *elasticsearchOutput) Init( beat string, config outputs.MothershipConfig, topologyExpire int, ) error { if len(config.Protocol) == 0 { config.Protocol = "http" } var urls []string if len(config.Hosts) > 0 { // use hosts setting for _, host := range config.Hosts { url, err := getURL(config.Protocol, config.Path, host) if err != nil { logp.Err("Invalid host param set: %s, Error: %v", host, err) } urls = append(urls, url) } } else { // usage of host and port is deprecated as it is replaced by hosts url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path) urls = append(urls, url) } tlsConfig, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return err } es := NewElasticsearch(urls, tlsConfig, config.Username, config.Password) out.Conn = es if config.Index != "" { out.Index = config.Index } else { out.Index = beat } out.TopologyExpire = 15000 if topologyExpire != 0 { out.TopologyExpire = topologyExpire /*sec*/ * 1000 // millisec } if config.Max_retries != nil { out.Conn.SetMaxRetries(*config.Max_retries) } logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", urls) logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index) logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000) if config.Save_topology { err := out.EnableTTL() if err != nil { logp.Err("Fail to set _ttl mapping: %s", err) // keep trying in the background go func() { for { err := out.EnableTTL() if err == nil { break } logp.Err("Fail to set _ttl mapping: %s", err) time.Sleep(5 * time.Second) } }() } } return nil }
// Initialize Elasticsearch as output func (out *elasticsearchOutput) Init(beat string, config outputs.MothershipConfig, topology_expire int) error { if len(config.Protocol) == 0 { config.Protocol = "http" } var urls []string if len(config.Hosts) > 0 { // use hosts setting for _, host := range config.Hosts { url := fmt.Sprintf("%s://%s%s", config.Protocol, host, config.Path) urls = append(urls, url) } } else { // use host and port settings url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path) urls = append(urls, url) } es := NewElasticsearch(urls, config.Username, config.Password) out.Conn = es if config.Index != "" { out.Index = config.Index } else { out.Index = beat } out.TopologyExpire = 15000 if topology_expire != 0 { out.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec } out.FlushInterval = 1000 * time.Millisecond if config.Flush_interval != nil { out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond } out.BulkMaxSize = 10000 if config.Bulk_size != nil { out.BulkMaxSize = *config.Bulk_size } if config.Max_retries != nil { out.Conn.SetMaxRetries(*config.Max_retries) } logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", urls) logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index) logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000) if out.FlushInterval > 0 { logp.Info("[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.", out.FlushInterval, out.BulkMaxSize) } else { logp.Info("[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.") } if config.Save_topology { err := out.EnableTTL() if err != nil { logp.Err("Fail to set _ttl mapping: %s", err) // keep trying in the background go func() { for { err := out.EnableTTL() if err == nil { break } logp.Err("Fail to set _ttl mapping: %s", err) time.Sleep(5 * time.Second) } }() } } out.sendingQueue = make(chan EventMsg, 1000) go out.SendMessagesGoroutine() return nil }
func (out *elasticsearchOutput) init( beat string, config outputs.MothershipConfig, topologyExpire int, ) error { tlsConfig, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return err } // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } clients, err := mode.MakeClients(config, makeClientFactory(beat, tlsConfig, config)) if err != nil { return err } timeout := elasticsearchDefaultTimeout if config.Timeout != 0 { timeout = time.Duration(config.Timeout) * time.Second } maxRetries := defaultMaxRetries if config.Max_retries != nil { maxRetries = *config.Max_retries } var waitRetry = time.Duration(1) * time.Second var maxWaitRetry = time.Duration(60) * time.Second var m mode.ConnectionMode out.clients = clients if len(clients) == 1 { client := clients[0] m, err = mode.NewSingleConnectionMode(client, maxRetries, waitRetry, timeout, maxWaitRetry) } else { loadBalance := config.LoadBalance == nil || *config.LoadBalance if loadBalance { m, err = mode.NewLoadBalancerMode(clients, maxRetries, waitRetry, timeout, maxWaitRetry) } else { m, err = mode.NewFailOverConnectionMode(clients, maxRetries, waitRetry, timeout) } } if err != nil { return err } if config.Save_topology { err := out.EnableTTL() if err != nil { logp.Err("Fail to set _ttl mapping: %s", err) // keep trying in the background go func() { for { err := out.EnableTTL() if err == nil { break } logp.Err("Fail to set _ttl mapping: %s", err) time.Sleep(5 * time.Second) } }() } } out.TopologyExpire = 15000 if topologyExpire != 0 { out.TopologyExpire = topologyExpire * 1000 // millisec } out.mode = m if config.Index != "" { out.index = config.Index } else { out.index = beat } return nil }