// New creates a syslog logger using the configuration passed in on // the context. Supported context configuration variables are // syslog-address, syslog-facility, & syslog-tag. func New(ctx logger.Context) (logger.Logger, error) { tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } proto, address, err := parseAddress(ctx.Config["syslog-address"]) if err != nil { return nil, err } facility, err := parseFacility(ctx.Config["syslog-facility"]) if err != nil { return nil, err } log, err := syslog.Dial( proto, address, facility, path.Base(os.Args[0])+"/"+tag, ) if err != nil { return nil, err } return &syslogger{ writer: log, }, nil }
func TestCreateTagSuccess(t *testing.T) { mockClient := newMockClient() ctx := logger.Context{ ContainerName: "/test-container", ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, } logStreamName, e := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if e != nil { t.Errorf("Error generating tag: %q", e) } stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: logStreamName, } mockClient.createLogStreamResult <- &createLogStreamResult{} err := stream.create() if err != nil { t.Errorf("Received unexpected err: %v\n", err) } argument := <-mockClient.createLogStreamArgument if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") } }
// New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-group, and awslogs-stream. When available, configuration is // also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, // AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and // the EC2 Instance Metadata Service. func New(ctx logger.Context) (logger.Logger, error) { logGroupName := ctx.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(ctx, "{{.FullID}}") if err != nil { return nil, err } if ctx.Config[logStreamKey] != "" { logStreamName = ctx.Config[logStreamKey] } client, err := newAWSLogsClient(ctx) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: logStreamName, logGroupName: logGroupName, client: client, messages: make(chan *logger.Message, 4096), } err = containerStream.create() if err != nil { return nil, err } go containerStream.collectBatch() return containerStream, nil }
func parseConfig(ctx logger.Context) (string, int, string, error) { host := defaultHostName port := defaultPort config := ctx.Config tag, err := loggerutils.ParseLogTag(ctx, "docker.{{.ID}}") if err != nil { return "", 0, "", err } if address := config["fluentd-address"]; address != "" { if h, p, err := net.SplitHostPort(address); err != nil { if !strings.Contains(err.Error(), "missing port in address") { return "", 0, "", err } host = h } else { portnum, err := strconv.Atoi(p) if err != nil { return "", 0, "", err } host = h port = portnum } } return host, port, tag, nil }
// New creates a fluentd logger using the configuration passed in on // the context. Supported context configuration variables are // fluentd-address & fluentd-tag. func New(ctx logger.Context) (logger.Logger, error) { host, port, err := parseAddress(ctx.Config["fluentd-address"]) if err != nil { return nil, err } tag, err := loggerutils.ParseLogTag(ctx, "docker.{{.ID}}") if err != nil { return nil, err } extra := ctx.ExtraAttributes(nil) logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra) // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32}) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: ctx.ContainerID, containerName: ctx.ContainerName, writer: log, extra: extra, }, nil }
// New creates a journald logger using the configuration passed in on // the context. func New(ctx logger.Context) (logger.Logger, error) { if !journal.Enabled() { return nil, fmt.Errorf("journald is not enabled on this host") } // Strip a leading slash so that people can search for // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. name := ctx.ContainerName if name[0] == '/' { name = name[1:] } // parse log tag tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } vars := map[string]string{ "CONTAINER_ID": ctx.ContainerID[:12], "CONTAINER_ID_FULL": ctx.ContainerID, "CONTAINER_NAME": name, "CONTAINER_TAG": tag, } extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) for k, v := range extraAttrs { vars[k] = v } return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil }
// New creates a gelf logger using the configuration passed in on the // context. Supported context configuration variables are // gelf-address, & gelf-tag. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // remove trailing slash from container name containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") // parse log tag tag, err := loggerutils.ParseLogTag(ctx, "") if err != nil { return nil, err } extra := map[string]interface{}{ "_container_id": ctx.ContainerID, "_container_name": string(containerName), "_image_id": ctx.ContainerImageID, "_image_name": ctx.ContainerImageName, "_command": ctx.Command(), "_tag": tag, "_created": ctx.ContainerCreated, } extraAttrs := ctx.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) for k, v := range extraAttrs { extra[k] = v } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } return &gelfLogger{ writer: gelfWriter, ctx: ctx, hostname: hostname, extra: extra, }, nil }
// New creates a syslog logger using the configuration passed in on // the context. Supported context configuration variables are // syslog-address, syslog-facility, syslog-format, syslog-tag. func New(ctx logger.Context) (logger.Logger, error) { tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } proto, address, err := parseAddress(ctx.Config["syslog-address"]) if err != nil { return nil, err } facility, err := parseFacility(ctx.Config["syslog-facility"]) if err != nil { return nil, err } syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) if err != nil { return nil, err } logTag := path.Base(os.Args[0]) + "/" + tag var log *syslog.Writer if proto == secureProto { tlsConfig, tlsErr := parseTLSConfig(ctx.Config) if tlsErr != nil { return nil, tlsErr } log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig) } else { log, err = syslog.Dial(proto, address, facility, logTag) } if err != nil { return nil, err } log.SetFormatter(syslogFormatter) log.SetFramer(syslogFramer) return &syslogger{ writer: log, }, nil }
// New creates a gelf logger using the configuration passed in on the // context. Supported context configuration variables are // gelf-address, & gelf-tag. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // remove trailing slash from container name containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") // parse log tag tag, err := loggerutils.ParseLogTag(ctx, "") if err != nil { return nil, err } fields := gelfFields{ hostname: hostname, containerID: ctx.ContainerID, containerName: string(containerName), imageID: ctx.ContainerImageID, imageName: ctx.ContainerImageName, command: ctx.Command(), tag: tag, created: ctx.ContainerCreated, } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } return &gelfLogger{ writer: gelfWriter, ctx: ctx, fields: fields, }, nil }
// New creates a syslog logger using the configuration passed in on // the context. Supported context configuration variables are // syslog-address, syslog-facility, syslog-format. func New(info logger.Info) (logger.Logger, error) { tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } proto, address, err := parseAddress(info.Config["syslog-address"]) if err != nil { return nil, err } facility, err := parseFacility(info.Config["syslog-facility"]) if err != nil { return nil, err } syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) if err != nil { return nil, err } var log *syslog.Writer if proto == secureProto { tlsConfig, tlsErr := parseTLSConfig(info.Config) if tlsErr != nil { return nil, tlsErr } log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) } else { log, err = syslog.Dial(proto, address, facility, tag) } if err != nil { return nil, err } log.SetFormatter(syslogFormatter) log.SetFramer(syslogFramer) return &syslogger{ writer: log, }, nil }
// New creates a journald logger using the configuration passed in on // the context. func New(ctx logger.Context) (logger.Logger, error) { if !journal.Enabled() { return nil, fmt.Errorf("journald is not enabled on this host") } // parse log tag tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } vars := map[string]string{ "CONTAINER_ID": ctx.ContainerID[:12], "CONTAINER_ID_FULL": ctx.ContainerID, "CONTAINER_NAME": ctx.Name(), "CONTAINER_TAG": tag, } extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) for k, v := range extraAttrs { vars[k] = v } return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil }
// New creates a fluentd logger using the configuration passed in on // the context. Supported context configuration variables are // fluentd-address & fluentd-tag. func New(ctx logger.Context) (logger.Logger, error) { host, port, err := parseAddress(ctx.Config["fluentd-address"]) if err != nil { return nil, err } tag, err := loggerutils.ParseLogTag(ctx, "docker.{{.ID}}") if err != nil { return nil, err } failOnStartupError, err := loggerutils.ParseFailOnStartupErrorFlag(ctx) if err != nil { return nil, err } bufferLimit, err := parseBufferLimit(ctx.Config["buffer-limit"]) if err != nil { return nil, err } extra := ctx.ExtraAttributes(nil) logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra) // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32, BufferLimit: bufferLimit}) if err != nil { if failOnStartupError { return nil, err } logrus.Warnf("fluentd cannot connect to configured endpoint. Ignoring as instructed. Error: %q", err) } return &fluentd{ tag: tag, containerID: ctx.ContainerID, containerName: ctx.ContainerName, writer: log, extra: extra, }, nil }
// New creates a gelf logger using the configuration passed in on the // context. The supported context configuration variable is gelf-address. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // parse log tag tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } extra := map[string]interface{}{ "_container_id": ctx.ContainerID, "_container_name": ctx.Name(), "_image_id": ctx.ContainerImageID, "_image_name": ctx.ContainerImageName, "_command": ctx.Command(), "_tag": tag, "_created": ctx.ContainerCreated, } extraAttrs := ctx.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) for k, v := range extraAttrs { extra[k] = v } rawExtra, err := json.Marshal(extra) if err != nil { return nil, err } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } if v, ok := ctx.Config["gelf-compression-type"]; ok { switch v { case "gzip": gelfWriter.CompressionType = gelf.CompressGzip case "zlib": gelfWriter.CompressionType = gelf.CompressZlib case "none": gelfWriter.CompressionType = gelf.CompressNone default: return nil, fmt.Errorf("gelf: invalid compression type %q", v) } } if v, ok := ctx.Config["gelf-compression-level"]; ok { val, err := strconv.Atoi(v) if err != nil { return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) } gelfWriter.CompressionLevel = val } return &gelfLogger{ writer: gelfWriter, ctx: ctx, hostname: hostname, rawExtra: rawExtra, }, nil }
// New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } source := ctx.Config[splunkSourceKey] sourceType := ctx.Config[splunkSourceTypeKey] index := ctx.Config[splunkIndexKey] var nullMessage = &splunkMessage{ Host: hostname, Source: source, SourceType: sourceType, Index: index, } tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } attrs := ctx.ExtraAttributes(nil) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, } // By default we verify connection, but we allow use to skip that verifyConnection := true if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { var err error verifyConnection, err = strconv.ParseBool(verifyConnectionStr) if err != nil { return nil, err } } if verifyConnection { err = verifySplunkConnection(logger) if err != nil { return nil, err } } var splunkFormat string if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { switch splunkFormatParsed { case splunkFormatInline: case splunkFormatJSON: case splunkFormatRaw: default: return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) } splunkFormat = splunkFormatParsed } else { splunkFormat = splunkFormatInline } switch splunkFormat { case splunkFormatInline: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } return &splunkLoggerInline{logger, nullEvent}, nil case splunkFormatJSON: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } return &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}, nil case splunkFormatRaw: var prefix bytes.Buffer prefix.WriteString(tag) prefix.WriteString(" ") for key, value := range attrs { prefix.WriteString(key) prefix.WriteString("=") prefix.WriteString(value) prefix.WriteString(" ") } return &splunkLoggerRaw{logger, prefix.Bytes()}, nil default: return nil, fmt.Errorf("Unexpected format %s", splunkFormat) } }
// New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } var nullMessage = &splunkMessage{ Host: hostname, } // Optional parameters for messages nullMessage.Source = ctx.Config[splunkSourceKey] nullMessage.SourceType = ctx.Config[splunkSourceTypeKey] nullMessage.Index = ctx.Config[splunkIndexKey] tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } nullMessage.Event.Tag = tag nullMessage.Event.Attrs = ctx.ExtraAttributes(nil) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, } err = verifySplunkConnection(logger) if err != nil { return nil, err } return logger, nil }
// New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(ctx logger.Context) (logger.Logger, error) { loc, err := parseAddress(ctx.Config[addressKey]) if err != nil { return nil, err } tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) if err != nil { return nil, err } extra := ctx.ExtraAttributes(nil) bufferLimit := defaultBufferLimit if ctx.Config[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) if err != nil { return nil, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if ctx.Config[retryWaitKey] != "" { rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) if err != nil { return nil, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if ctx.Config[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) if err != nil { return nil, err } maxRetries = int(mr64) } asyncConnect := false if ctx.Config[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { return nil, err } } fluentConfig := fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, AsyncConnect: asyncConnect, } logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: ctx.ContainerID, containerName: ctx.ContainerName, writer: log, extra: extra, }, nil }
// New creates splunk logger driver using configuration passed in context func New(info logger.Info) (logger.Logger, error) { hostname, err := info.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(info) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := info.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := info.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := info.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } gzipCompression := false if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { gzipCompression, err = strconv.ParseBool(gzipCompressionStr) if err != nil { return nil, err } } gzipCompressionLevel := gzip.DefaultCompression if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { var err error gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) if err != nil { return nil, err } gzipCompressionLevel = int(gzipCompressionLevel64) if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) return nil, err } } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } source := info.Config[splunkSourceKey] sourceType := info.Config[splunkSourceTypeKey] index := info.Config[splunkIndexKey] var nullMessage = &splunkMessage{ Host: hostname, Source: source, SourceType: sourceType, Index: index, } // Allow user to remove tag from the messages by setting tag to empty string tag := "" if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } } attrs := info.ExtraAttributes(nil) var ( postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) ) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, gzipCompression: gzipCompression, gzipCompressionLevel: gzipCompressionLevel, stream: make(chan *splunkMessage, streamChannelSize), postMessagesFrequency: postMessagesFrequency, postMessagesBatchSize: postMessagesBatchSize, bufferMaximum: bufferMaximum, } // By default we verify connection, but we allow use to skip that verifyConnection := true if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { var err error verifyConnection, err = strconv.ParseBool(verifyConnectionStr) if err != nil { return nil, err } } if verifyConnection { err = verifySplunkConnection(logger) if err != nil { return nil, err } } var splunkFormat string if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { switch splunkFormatParsed { case splunkFormatInline: case splunkFormatJSON: case splunkFormatRaw: default: return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) } splunkFormat = splunkFormatParsed } else { splunkFormat = splunkFormatInline } var loggerWrapper splunkLoggerInterface switch splunkFormat { case splunkFormatInline: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } loggerWrapper = &splunkLoggerInline{logger, nullEvent} case splunkFormatJSON: nullEvent := &splunkMessageEvent{ Tag: tag, Attrs: attrs, } loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} case splunkFormatRaw: var prefix bytes.Buffer if tag != "" { prefix.WriteString(tag) prefix.WriteString(" ") } for key, value := range attrs { prefix.WriteString(key) prefix.WriteString("=") prefix.WriteString(value) prefix.WriteString(" ") } loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} default: return nil, fmt.Errorf("Unexpected format %s", splunkFormat) } go loggerWrapper.worker() return loggerWrapper, nil }