// New creates a gelf logger using the configuration passed in on the // context. Supported context configuration variables are // gelf-address, & gelf-tag. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // remove trailing slash from container name containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") // parse log tag tag, err := loggerutils.ParseLogTag(ctx, "") if err != nil { return nil, err } extra := map[string]interface{}{ "_container_id": ctx.ContainerID, "_container_name": string(containerName), "_image_id": ctx.ContainerImageID, "_image_name": ctx.ContainerImageName, "_command": ctx.Command(), "_tag": tag, "_created": ctx.ContainerCreated, } extraAttrs := ctx.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) for k, v := range extraAttrs { extra[k] = v } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } return &gelfLogger{ writer: gelfWriter, ctx: ctx, hostname: hostname, extra: extra, }, nil }
// New creates a gelf logger using the configuration passed in on the // context. Supported context configuration variables are // gelf-address, & gelf-tag. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // remove trailing slash from container name containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") // parse log tag tag, err := loggerutils.ParseLogTag(ctx, "") if err != nil { return nil, err } fields := gelfFields{ hostname: hostname, containerID: ctx.ContainerID, containerName: string(containerName), imageID: ctx.ContainerImageID, imageName: ctx.ContainerImageName, command: ctx.Command(), tag: tag, created: ctx.ContainerCreated, } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } return &gelfLogger{ writer: gelfWriter, ctx: ctx, fields: fields, }, nil }
// New creates a gelf logger using the configuration passed in on the // context. The supported context configuration variable is gelf-address. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // parse log tag tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } extra := map[string]interface{}{ "_container_id": ctx.ContainerID, "_container_name": ctx.Name(), "_image_id": ctx.ContainerImageID, "_image_name": ctx.ContainerImageName, "_command": ctx.Command(), "_tag": tag, "_created": ctx.ContainerCreated, } extraAttrs := ctx.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) for k, v := range extraAttrs { extra[k] = v } rawExtra, err := json.Marshal(extra) if err != nil { return nil, err } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } if v, ok := ctx.Config["gelf-compression-type"]; ok { switch v { case "gzip": gelfWriter.CompressionType = gelf.CompressGzip case "zlib": gelfWriter.CompressionType = gelf.CompressZlib case "none": gelfWriter.CompressionType = gelf.CompressNone default: return nil, fmt.Errorf("gelf: invalid compression type %q", v) } } if v, ok := ctx.Config["gelf-compression-level"]; ok { val, err := strconv.Atoi(v) if err != nil { return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) } gelfWriter.CompressionLevel = val } return &gelfLogger{ writer: gelfWriter, ctx: ctx, hostname: hostname, rawExtra: rawExtra, }, nil }
// New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } var nullMessage = &splunkMessage{ Host: hostname, } // Optional parameters for messages nullMessage.Source = ctx.Config[splunkSourceKey] nullMessage.SourceType = ctx.Config[splunkSourceTypeKey] nullMessage.Index = ctx.Config[splunkIndexKey] tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } nullMessage.Event.Tag = tag nullMessage.Event.Attrs = ctx.ExtraAttributes(nil) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, } err = verifySplunkConnection(logger) if err != nil { return nil, err } return logger, nil }
// Test default settings func TestDefault(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() ctx := logger.Context{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, }, ContainerID: "containeriid", ContainerName: "container_name", ContainerImageID: "contaimageid", ContainerImageName: "container_image_name", } hostname, err := ctx.Hostname() if err != nil { t.Fatal(err) } loggerDriver, err := New(ctx) if err != nil { t.Fatal(err) } if loggerDriver.Name() != driverName { t.Fatal("Unexpected logger driver name") } if !hec.connectionVerified { t.Fatal("By default connection should be verified") } splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) if !ok { t.Fatal("Unexpected Splunk Logging Driver type") } if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || splunkLoggerDriver.auth != "Splunk "+hec.token || splunkLoggerDriver.nullMessage.Host != hostname || splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || splunkLoggerDriver.gzipCompression != false || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { t.Fatal("Found not default values setup in Splunk Logging Driver.") } message1Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { t.Fatal(err) } message2Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("notajson"), "stdout", message2Time, nil, false}); err != nil { t.Fatal(err) } err = loggerDriver.Close() if err != nil { t.Fatal(err) } if len(hec.messages) != 2 { t.Fatal("Expected two messages") } if *hec.gzipEnabled { t.Fatal("Gzip should not be used") } message1 := hec.messages[0] if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || message1.Host != hostname || message1.Source != "" || message1.SourceType != "" || message1.Index != "" { t.Fatalf("Unexpected values of message 1 %v", message1) } if event, err := message1.EventAsMap(); err != nil { t.Fatal(err) } else { if event["line"] != "{\"a\":\"b\"}" || event["source"] != "stdout" || event["tag"] != "containeriid" || len(event) != 3 { t.Fatalf("Unexpected event in message %v", event) } } message2 := hec.messages[1] if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || message2.Host != hostname || message2.Source != "" || message2.SourceType != "" || message2.Index != "" { t.Fatalf("Unexpected values of message 1 %v", message2) } if event, err := message2.EventAsMap(); err != nil { t.Fatal(err) } else { if event["line"] != "notajson" || event["source"] != "stdout" || event["tag"] != "containeriid" || len(event) != 3 { t.Fatalf("Unexpected event in message %v", event) } } err = hec.Close() if err != nil { t.Fatal(err) } }
// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages // in the same way we get them in stdout/stderr func TestRawFormatWithoutTag(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() ctx := logger.Context{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, splunkFormatKey: splunkFormatRaw, tagKey: "", }, ContainerID: "containeriid", ContainerName: "/container_name", ContainerImageID: "contaimageid", ContainerImageName: "container_image_name", } hostname, err := ctx.Hostname() if err != nil { t.Fatal(err) } loggerDriver, err := New(ctx) if err != nil { t.Fatal(err) } if !hec.connectionVerified { t.Fatal("By default connection should be verified") } splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) if !ok { t.Fatal("Unexpected Splunk Logging Driver type") } if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || splunkLoggerDriver.auth != "Splunk "+hec.token || splunkLoggerDriver.nullMessage.Host != hostname || splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || splunkLoggerDriver.gzipCompression != false || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || string(splunkLoggerDriver.prefix) != "" { t.Log(string(splunkLoggerDriver.prefix) + "a") t.Fatal("Values do not match configuration.") } message1Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { t.Fatal(err) } message2Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { t.Fatal(err) } err = loggerDriver.Close() if err != nil { t.Fatal(err) } if len(hec.messages) != 2 { t.Fatal("Expected two messages") } message1 := hec.messages[0] if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || message1.Host != hostname || message1.Source != "" || message1.SourceType != "" || message1.Index != "" { t.Fatalf("Unexpected values of message 1 %v", message1) } if event, err := message1.EventAsString(); err != nil { t.Fatal(err) } else { if event != "{\"a\":\"b\"}" { t.Fatalf("Unexpected event in message 1 %v", event) } } message2 := hec.messages[1] if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || message2.Host != hostname || message2.Source != "" || message2.SourceType != "" || message2.Index != "" { t.Fatalf("Unexpected values of message 2 %v", message2) } if event, err := message2.EventAsString(); err != nil { t.Fatal(err) } else { if event != "notjson" { t.Fatalf("Unexpected event in message 2 %v", event) } } err = hec.Close() if err != nil { t.Fatal(err) } }
// Verify JSON format func TestJsonFormat(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() ctx := logger.Context{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, splunkFormatKey: splunkFormatJSON, splunkGzipCompressionKey: "true", splunkGzipCompressionLevelKey: "1", }, ContainerID: "containeriid", ContainerName: "/container_name", ContainerImageID: "contaimageid", ContainerImageName: "container_image_name", } hostname, err := ctx.Hostname() if err != nil { t.Fatal(err) } loggerDriver, err := New(ctx) if err != nil { t.Fatal(err) } if !hec.connectionVerified { t.Fatal("By default connection should be verified") } splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) if !ok { t.Fatal("Unexpected Splunk Logging Driver type") } if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || splunkLoggerDriver.auth != "Splunk "+hec.token || splunkLoggerDriver.nullMessage.Host != hostname || splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || splunkLoggerDriver.gzipCompression != true || splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { t.Fatal("Values do not match configuration.") } message1Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { t.Fatal(err) } message2Time := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { t.Fatal(err) } err = loggerDriver.Close() if err != nil { t.Fatal(err) } if len(hec.messages) != 2 { t.Fatal("Expected two messages") } message1 := hec.messages[0] if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || message1.Host != hostname || message1.Source != "" || message1.SourceType != "" || message1.Index != "" { t.Fatalf("Unexpected values of message 1 %v", message1) } if event, err := message1.EventAsMap(); err != nil { t.Fatal(err) } else { if event["line"].(map[string]interface{})["a"] != "b" || event["source"] != "stdout" || event["tag"] != "containeriid" || len(event) != 3 { t.Fatalf("Unexpected event in message 1 %v", event) } } message2 := hec.messages[1] if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || message2.Host != hostname || message2.Source != "" || message2.SourceType != "" || message2.Index != "" { t.Fatalf("Unexpected values of message 2 %v", message2) } // If message cannot be parsed as JSON - it should be sent as a line if event, err := message2.EventAsMap(); err != nil { t.Fatal(err) } else { if event["line"] != "notjson" || event["source"] != "stdout" || event["tag"] != "containeriid" || len(event) != 3 { t.Fatalf("Unexpected event in message 2 %v", event) } } err = hec.Close() if err != nil { t.Fatal(err) } }
// Verify inline format with a not default settings for most of options func TestInlineFormatWithNonDefaultOptions(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() ctx := logger.Context{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, splunkSourceKey: "mysource", splunkSourceTypeKey: "mysourcetype", splunkIndexKey: "myindex", splunkFormatKey: splunkFormatInline, splunkGzipCompressionKey: "true", tagKey: "{{.ImageName}}/{{.Name}}", labelsKey: "a", }, ContainerID: "containeriid", ContainerName: "/container_name", ContainerImageID: "contaimageid", ContainerImageName: "container_image_name", ContainerLabels: map[string]string{ "a": "b", }, } hostname, err := ctx.Hostname() if err != nil { t.Fatal(err) } loggerDriver, err := New(ctx) if err != nil { t.Fatal(err) } if !hec.connectionVerified { t.Fatal("By default connection should be verified") } splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) if !ok { t.Fatal("Unexpected Splunk Logging Driver type") } if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || splunkLoggerDriver.auth != "Splunk "+hec.token || splunkLoggerDriver.nullMessage.Host != hostname || splunkLoggerDriver.nullMessage.Source != "mysource" || splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || splunkLoggerDriver.nullMessage.Index != "myindex" || splunkLoggerDriver.gzipCompression != true || splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { t.Fatal("Values do not match configuration.") } messageTime := time.Now() if err := loggerDriver.Log(&logger.Message{[]byte("1"), "stdout", messageTime, nil, false}); err != nil { t.Fatal(err) } err = loggerDriver.Close() if err != nil { t.Fatal(err) } if len(hec.messages) != 1 { t.Fatal("Expected one message") } if !*hec.gzipEnabled { t.Fatal("Gzip should be used") } message := hec.messages[0] if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || message.Host != hostname || message.Source != "mysource" || message.SourceType != "mysourcetype" || message.Index != "myindex" { t.Fatalf("Unexpected values of message %v", message) } if event, err := message.EventAsMap(); err != nil { t.Fatal(err) } else { if event["line"] != "1" || event["source"] != "stdout" || event["tag"] != "container_image_name/container_name" || event["attrs"].(map[string]interface{})["a"] != "b" || len(event) != 4 { t.Fatalf("Unexpected event in message %v", event) } } err = hec.Close() if err != nil { t.Fatal(err) } }
// New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } source := ctx.Config[splunkSourceKey] sourceType := ctx.Config[splunkSourceTypeKey] index := ctx.Config[splunkIndexKey] var nullMessage = &splunkMessage{ Host: hostname, Source: source, SourceType: sourceType, Index: index, } tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } attrs := ctx.ExtraAttributes(nil) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, } // By default we verify connection, but we allow use to skip that verifyConnection := true if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { var err error verifyConnection, err = strconv.ParseBool(verifyConnectionStr) if err != nil { return nil, err } } if verifyConnection { err = verifySplunkConnection(logger) if err != nil { return nil, err } } var splunkFormat string if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { switch splunkFormatParsed { case splunkFormatInline: case splunkFormatJSON: case splunkFormatRaw: default: return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) } splunkFormat = splunkFormatParsed } else { splunkFormat = splunkFormatInline } switch splunkFormat { case splunkFormatInline: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } return &splunkLoggerInline{logger, nullEvent}, nil case splunkFormatJSON: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } return &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}, nil case splunkFormatRaw: var prefix bytes.Buffer prefix.WriteString(tag) prefix.WriteString(" ") for key, value := range attrs { prefix.WriteString(key) prefix.WriteString("=") prefix.WriteString(value) prefix.WriteString(" ") } return &splunkLoggerRaw{logger, prefix.Bytes()}, nil default: return nil, fmt.Errorf("Unexpected format %s", splunkFormat) } }
// New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } gzipCompression := false if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok { gzipCompression, err = strconv.ParseBool(gzipCompressionStr) if err != nil { return nil, err } } gzipCompressionLevel := gzip.DefaultCompression if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok { var err error gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) if err != nil { return nil, err } gzipCompressionLevel = int(gzipCompressionLevel64) if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) return nil, err } } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } source := ctx.Config[splunkSourceKey] sourceType := ctx.Config[splunkSourceTypeKey] index := ctx.Config[splunkIndexKey] var nullMessage = &splunkMessage{ Host: hostname, Source: source, SourceType: sourceType, Index: index, } // Allow user to remove tag from the messages by setting tag to empty string tag := "" if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" { tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } } attrs := ctx.ExtraAttributes(nil) var ( postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) ) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, gzipCompression: gzipCompression, gzipCompressionLevel: gzipCompressionLevel, stream: make(chan *splunkMessage, streamChannelSize), postMessagesFrequency: postMessagesFrequency, postMessagesBatchSize: postMessagesBatchSize, bufferMaximum: bufferMaximum, } // By default we verify connection, but we allow use to skip that verifyConnection := true if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { var err error verifyConnection, err = strconv.ParseBool(verifyConnectionStr) if err != nil { return nil, err } } if verifyConnection { err = verifySplunkConnection(logger) if err != nil { return nil, err } } var splunkFormat string if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { switch splunkFormatParsed { case splunkFormatInline: case splunkFormatJSON: case splunkFormatRaw: default: return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) } splunkFormat = splunkFormatParsed } else { splunkFormat = splunkFormatInline } var loggerWrapper splunkLoggerInterface switch splunkFormat { case splunkFormatInline: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } loggerWrapper = &splunkLoggerInline{logger, nullEvent} case splunkFormatJSON: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} case splunkFormatRaw: var prefix bytes.Buffer if tag != "" { prefix.WriteString(tag) prefix.WriteString(" ") } for key, value := range attrs { prefix.WriteString(key) prefix.WriteString("=") prefix.WriteString(value) prefix.WriteString(" ") } loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} default: return nil, fmt.Errorf("Unexpected format %s", splunkFormat) } go loggerWrapper.worker() return loggerWrapper, nil }