2015-08-27 19:03:46 -04:00
|
|
|
// Package splunk provides the log driver for forwarding server logs to
|
|
|
|
// Splunk HTTP Event Collector endpoint.
|
2018-02-05 16:05:59 -05:00
|
|
|
package splunk // import "github.com/docker/docker/daemon/logger/splunk"
|
2015-08-27 19:03:46 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2016-08-25 14:27:02 -04:00
|
|
|
"compress/gzip"
|
2017-11-14 10:15:38 -05:00
|
|
|
"context"
|
2015-08-27 19:03:46 -04:00
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2016-08-25 14:27:02 -04:00
|
|
|
"os"
|
2015-08-27 19:03:46 -04:00
|
|
|
"strconv"
|
2017-08-14 12:27:51 -04:00
|
|
|
"strings"
|
2016-08-25 14:27:02 -04:00
|
|
|
"sync"
|
2016-08-18 12:17:06 -04:00
|
|
|
"time"
|
2015-08-27 19:03:46 -04:00
|
|
|
|
|
|
|
"github.com/docker/docker/daemon/logger"
|
2015-11-10 19:42:27 -05:00
|
|
|
"github.com/docker/docker/daemon/logger/loggerutils"
|
2017-11-15 09:46:32 -05:00
|
|
|
"github.com/docker/docker/pkg/pools"
|
2015-08-27 19:03:46 -04:00
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
2019-09-25 07:27:28 -04:00
|
|
|
"github.com/google/uuid"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-08-27 19:03:46 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-08-25 14:27:02 -04:00
|
|
|
driverName = "splunk"
|
|
|
|
splunkURLKey = "splunk-url"
|
|
|
|
splunkTokenKey = "splunk-token"
|
|
|
|
splunkSourceKey = "splunk-source"
|
|
|
|
splunkSourceTypeKey = "splunk-sourcetype"
|
|
|
|
splunkIndexKey = "splunk-index"
|
|
|
|
splunkCAPathKey = "splunk-capath"
|
|
|
|
splunkCANameKey = "splunk-caname"
|
|
|
|
splunkInsecureSkipVerifyKey = "splunk-insecureskipverify"
|
|
|
|
splunkFormatKey = "splunk-format"
|
|
|
|
splunkVerifyConnectionKey = "splunk-verify-connection"
|
|
|
|
splunkGzipCompressionKey = "splunk-gzip"
|
|
|
|
splunkGzipCompressionLevelKey = "splunk-gzip-level"
|
2019-09-25 07:27:28 -04:00
|
|
|
splunkIndexAcknowledgment = "splunk-index-acknowledgment"
|
2016-08-25 14:27:02 -04:00
|
|
|
envKey = "env"
|
2016-11-08 19:34:47 -05:00
|
|
|
envRegexKey = "env-regex"
|
2016-08-25 14:27:02 -04:00
|
|
|
labelsKey = "labels"
|
2019-01-16 16:52:22 -05:00
|
|
|
labelsRegexKey = "labels-regex"
|
2016-08-25 14:27:02 -04:00
|
|
|
tagKey = "tag"
|
2015-08-27 19:03:46 -04:00
|
|
|
)
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
const (
|
|
|
|
// How often do we send messages (if we are not reaching batch size)
|
|
|
|
defaultPostMessagesFrequency = 5 * time.Second
|
|
|
|
// How big can be batch of messages
|
|
|
|
defaultPostMessagesBatchSize = 1000
|
|
|
|
// Maximum number of messages we can store in buffer
|
|
|
|
defaultBufferMaximum = 10 * defaultPostMessagesBatchSize
|
|
|
|
// Number of messages allowed to be queued in the channel
|
|
|
|
defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize
|
2017-11-15 09:46:32 -05:00
|
|
|
// maxResponseSize is the max amount that will be read from an http response
|
|
|
|
maxResponseSize = 1024
|
2016-08-25 14:27:02 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY"
|
|
|
|
envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE"
|
|
|
|
envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX"
|
|
|
|
envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
|
|
|
|
)
|
|
|
|
|
2017-11-14 10:15:38 -05:00
|
|
|
var batchSendTimeout = 30 * time.Second
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
type splunkLoggerInterface interface {
|
|
|
|
logger.Logger
|
|
|
|
worker()
|
|
|
|
}
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
type splunkLogger struct {
|
|
|
|
client *http.Client
|
|
|
|
transport *http.Transport
|
|
|
|
|
|
|
|
url string
|
|
|
|
auth string
|
|
|
|
nullMessage *splunkMessage
|
2016-08-25 14:27:02 -04:00
|
|
|
|
|
|
|
// http compression
|
|
|
|
gzipCompression bool
|
|
|
|
gzipCompressionLevel int
|
|
|
|
|
|
|
|
// Advanced options
|
|
|
|
postMessagesFrequency time.Duration
|
|
|
|
postMessagesBatchSize int
|
|
|
|
bufferMaximum int
|
2019-09-25 07:27:28 -04:00
|
|
|
indexAck bool
|
2016-08-25 14:27:02 -04:00
|
|
|
|
|
|
|
// For synchronization between background worker and logger.
|
|
|
|
// We use channel to send messages to worker go routine.
|
|
|
|
// All other variables for blocking Close call before we flush all messages to HEC
|
|
|
|
stream chan *splunkMessage
|
|
|
|
lock sync.RWMutex
|
|
|
|
closed bool
|
|
|
|
closedCond *sync.Cond
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
type splunkLoggerInline struct {
|
2016-08-18 12:17:06 -04:00
|
|
|
*splunkLogger
|
2016-06-10 00:06:44 -04:00
|
|
|
|
|
|
|
nullEvent *splunkMessageEvent
|
|
|
|
}
|
|
|
|
|
|
|
|
type splunkLoggerJSON struct {
|
2016-08-18 12:17:06 -04:00
|
|
|
*splunkLoggerInline
|
2016-06-10 00:06:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type splunkLoggerRaw struct {
|
2016-08-18 12:17:06 -04:00
|
|
|
*splunkLogger
|
2016-06-10 00:06:44 -04:00
|
|
|
|
|
|
|
prefix []byte
|
|
|
|
}
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
type splunkMessage struct {
|
2016-06-10 00:06:44 -04:00
|
|
|
Event interface{} `json:"event"`
|
|
|
|
Time string `json:"time"`
|
|
|
|
Host string `json:"host"`
|
|
|
|
Source string `json:"source,omitempty"`
|
|
|
|
SourceType string `json:"sourcetype,omitempty"`
|
|
|
|
Index string `json:"index,omitempty"`
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type splunkMessageEvent struct {
|
2016-06-10 00:06:44 -04:00
|
|
|
Line interface{} `json:"line"`
|
2015-11-10 19:42:27 -05:00
|
|
|
Source string `json:"source"`
|
|
|
|
Tag string `json:"tag,omitempty"`
|
|
|
|
Attrs map[string]string `json:"attrs,omitempty"`
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
const (
|
|
|
|
splunkFormatRaw = "raw"
|
|
|
|
splunkFormatJSON = "json"
|
|
|
|
splunkFormatInline = "inline"
|
|
|
|
)
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
func init() {
|
|
|
|
if err := logger.RegisterLogDriver(driverName, New); err != nil {
|
|
|
|
logrus.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil {
|
|
|
|
logrus.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// New creates splunk logger driver using configuration passed in context
|
2016-11-26 00:08:34 -05:00
|
|
|
func New(info logger.Info) (logger.Logger, error) {
|
|
|
|
hostname, err := info.Hostname()
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse and validate Splunk URL
|
2016-11-26 00:08:34 -05:00
|
|
|
splunkURL, err := parseURL(info)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Splunk Token is required parameter
|
2016-11-26 00:08:34 -05:00
|
|
|
splunkToken, ok := info.Config[splunkTokenKey]
|
2015-08-27 19:03:46 -04:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey)
|
|
|
|
}
|
|
|
|
|
2021-05-31 05:26:10 -04:00
|
|
|
// FIXME set minimum TLS version for splunk (see https://github.com/moby/moby/issues/42443)
|
|
|
|
tlsConfig := &tls.Config{} //nolint: gosec // G402: TLS MinVersion too low.
|
2015-08-27 19:03:46 -04:00
|
|
|
|
|
|
|
// Splunk is using autogenerated certificates by default,
|
2015-12-13 11:00:39 -05:00
|
|
|
// allow users to trust them with skipping verification
|
2016-11-26 00:08:34 -05:00
|
|
|
if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok {
|
2015-08-27 19:03:46 -04:00
|
|
|
insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
tlsConfig.InsecureSkipVerify = insecureSkipVerify
|
|
|
|
}
|
|
|
|
|
|
|
|
// If path to the root certificate is provided - load it
|
2016-11-26 00:08:34 -05:00
|
|
|
if caPath, ok := info.Config[splunkCAPathKey]; ok {
|
2021-08-24 06:10:50 -04:00
|
|
|
caCert, err := os.ReadFile(caPath)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
caPool := x509.NewCertPool()
|
|
|
|
caPool.AppendCertsFromPEM(caCert)
|
|
|
|
tlsConfig.RootCAs = caPool
|
|
|
|
}
|
|
|
|
|
2016-11-26 00:08:34 -05:00
|
|
|
if caName, ok := info.Config[splunkCANameKey]; ok {
|
2015-08-27 19:03:46 -04:00
|
|
|
tlsConfig.ServerName = caName
|
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
gzipCompression := false
|
2016-11-26 00:08:34 -05:00
|
|
|
if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok {
|
2016-08-25 14:27:02 -04:00
|
|
|
gzipCompression, err = strconv.ParseBool(gzipCompressionStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gzipCompressionLevel := gzip.DefaultCompression
|
2016-11-26 00:08:34 -05:00
|
|
|
if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok {
|
2016-08-25 14:27:02 -04:00
|
|
|
var err error
|
|
|
|
gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
gzipCompressionLevel = int(gzipCompressionLevel64)
|
|
|
|
if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression {
|
2017-08-17 15:16:30 -04:00
|
|
|
err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)",
|
2016-08-25 14:27:02 -04:00
|
|
|
gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-25 07:27:28 -04:00
|
|
|
indexAck := false
|
|
|
|
if indexAckStr, ok := info.Config[splunkIndexAcknowledgment]; ok {
|
|
|
|
indexAck, err = strconv.ParseBool(indexAckStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
transport := &http.Transport{
|
|
|
|
TLSClientConfig: tlsConfig,
|
2018-02-06 12:33:18 -05:00
|
|
|
Proxy: http.ProxyFromEnvironment,
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
client := &http.Client{
|
|
|
|
Transport: transport,
|
|
|
|
}
|
|
|
|
|
2016-11-26 00:08:34 -05:00
|
|
|
source := info.Config[splunkSourceKey]
|
|
|
|
sourceType := info.Config[splunkSourceTypeKey]
|
|
|
|
index := info.Config[splunkIndexKey]
|
2016-06-10 00:06:44 -04:00
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
var nullMessage = &splunkMessage{
|
2016-06-10 00:06:44 -04:00
|
|
|
Host: hostname,
|
|
|
|
Source: source,
|
|
|
|
SourceType: sourceType,
|
|
|
|
Index: index,
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
// Allow user to remove tag from the messages by setting tag to empty string
|
|
|
|
tag := ""
|
2016-11-26 00:08:34 -05:00
|
|
|
if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" {
|
|
|
|
tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
|
2016-08-25 14:27:02 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-10 19:42:27 -05:00
|
|
|
}
|
2016-06-10 00:06:44 -04:00
|
|
|
|
2016-11-08 19:34:47 -05:00
|
|
|
attrs, err := info.ExtraAttributes(nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-10 19:42:27 -05:00
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
var (
|
|
|
|
postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency)
|
|
|
|
postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize)
|
|
|
|
bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum)
|
|
|
|
streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize)
|
|
|
|
)
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
logger := &splunkLogger{
|
2016-08-25 14:27:02 -04:00
|
|
|
client: client,
|
|
|
|
transport: transport,
|
|
|
|
url: splunkURL.String(),
|
|
|
|
auth: "Splunk " + splunkToken,
|
|
|
|
nullMessage: nullMessage,
|
|
|
|
gzipCompression: gzipCompression,
|
|
|
|
gzipCompressionLevel: gzipCompressionLevel,
|
|
|
|
stream: make(chan *splunkMessage, streamChannelSize),
|
|
|
|
postMessagesFrequency: postMessagesFrequency,
|
|
|
|
postMessagesBatchSize: postMessagesBatchSize,
|
|
|
|
bufferMaximum: bufferMaximum,
|
2019-09-25 07:27:28 -04:00
|
|
|
indexAck: indexAck,
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
// By default we verify connection, but we allow use to skip that
|
|
|
|
verifyConnection := true
|
2016-11-26 00:08:34 -05:00
|
|
|
if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok {
|
2016-06-10 00:06:44 -04:00
|
|
|
var err error
|
|
|
|
verifyConnection, err = strconv.ParseBool(verifyConnectionStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if verifyConnection {
|
|
|
|
err = verifySplunkConnection(logger)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
var splunkFormat string
|
2016-11-26 00:08:34 -05:00
|
|
|
if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok {
|
2016-06-10 00:06:44 -04:00
|
|
|
switch splunkFormatParsed {
|
|
|
|
case splunkFormatInline:
|
|
|
|
case splunkFormatJSON:
|
|
|
|
case splunkFormatRaw:
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat)
|
|
|
|
}
|
|
|
|
splunkFormat = splunkFormatParsed
|
|
|
|
} else {
|
|
|
|
splunkFormat = splunkFormatInline
|
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
var loggerWrapper splunkLoggerInterface
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
switch splunkFormat {
|
|
|
|
case splunkFormatInline:
|
|
|
|
nullEvent := &splunkMessageEvent{
|
|
|
|
Tag: tag,
|
|
|
|
Attrs: attrs,
|
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
loggerWrapper = &splunkLoggerInline{logger, nullEvent}
|
2016-06-10 00:06:44 -04:00
|
|
|
case splunkFormatJSON:
|
|
|
|
nullEvent := &splunkMessageEvent{
|
|
|
|
Tag: tag,
|
|
|
|
Attrs: attrs,
|
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}
|
2016-06-10 00:06:44 -04:00
|
|
|
case splunkFormatRaw:
|
|
|
|
var prefix bytes.Buffer
|
2016-08-25 14:27:02 -04:00
|
|
|
if tag != "" {
|
|
|
|
prefix.WriteString(tag)
|
|
|
|
prefix.WriteString(" ")
|
|
|
|
}
|
2016-06-10 00:06:44 -04:00
|
|
|
for key, value := range attrs {
|
|
|
|
prefix.WriteString(key)
|
|
|
|
prefix.WriteString("=")
|
|
|
|
prefix.WriteString(value)
|
|
|
|
prefix.WriteString(" ")
|
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()}
|
2016-06-10 00:06:44 -04:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Unexpected format %s", splunkFormat)
|
|
|
|
}
|
2016-08-25 14:27:02 -04:00
|
|
|
|
|
|
|
go loggerWrapper.worker()
|
|
|
|
|
|
|
|
return loggerWrapper, nil
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
func (l *splunkLoggerInline) Log(msg *logger.Message) error {
|
|
|
|
message := l.createSplunkMessage(msg)
|
|
|
|
|
|
|
|
event := *l.nullEvent
|
|
|
|
event.Line = string(msg.Line)
|
|
|
|
event.Source = msg.Source
|
2015-08-27 19:03:46 -04:00
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
message.Event = &event
|
2016-12-12 09:54:20 -05:00
|
|
|
logger.PutMessage(msg)
|
2016-08-25 14:27:02 -04:00
|
|
|
return l.queueMessageAsync(message)
|
2016-06-10 00:06:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
|
|
|
|
message := l.createSplunkMessage(msg)
|
|
|
|
event := *l.nullEvent
|
|
|
|
|
|
|
|
var rawJSONMessage json.RawMessage
|
|
|
|
if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil {
|
|
|
|
event.Line = &rawJSONMessage
|
|
|
|
} else {
|
|
|
|
event.Line = string(msg.Line)
|
|
|
|
}
|
|
|
|
|
|
|
|
event.Source = msg.Source
|
|
|
|
|
|
|
|
message.Event = &event
|
2016-12-12 09:54:20 -05:00
|
|
|
logger.PutMessage(msg)
|
2016-08-25 14:27:02 -04:00
|
|
|
return l.queueMessageAsync(message)
|
2016-06-10 00:06:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
|
2017-08-14 12:27:51 -04:00
|
|
|
// empty or whitespace-only messages are not accepted by HEC
|
|
|
|
if strings.TrimSpace(string(msg.Line)) == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 00:06:44 -04:00
|
|
|
message := l.createSplunkMessage(msg)
|
|
|
|
|
|
|
|
message.Event = string(append(l.prefix, msg.Line...))
|
2016-12-12 09:54:20 -05:00
|
|
|
logger.PutMessage(msg)
|
2016-08-25 14:27:02 -04:00
|
|
|
return l.queueMessageAsync(message)
|
2016-06-10 00:06:44 -04:00
|
|
|
}
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error {
|
|
|
|
l.lock.RLock()
|
|
|
|
defer l.lock.RUnlock()
|
|
|
|
if l.closedCond != nil {
|
|
|
|
return fmt.Errorf("%s: driver is closed", driverName)
|
|
|
|
}
|
|
|
|
l.stream <- message
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLogger) worker() {
|
|
|
|
timer := time.NewTicker(l.postMessagesFrequency)
|
|
|
|
var messages []*splunkMessage
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case message, open := <-l.stream:
|
|
|
|
if !open {
|
|
|
|
l.postMessages(messages, true)
|
|
|
|
l.lock.Lock()
|
|
|
|
defer l.lock.Unlock()
|
|
|
|
l.transport.CloseIdleConnections()
|
|
|
|
l.closed = true
|
|
|
|
l.closedCond.Signal()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
messages = append(messages, message)
|
|
|
|
// Only sending when we get exactly to the batch size,
|
|
|
|
// This also helps not to fire postMessages on every new message,
|
|
|
|
// when previous try failed.
|
|
|
|
if len(messages)%l.postMessagesBatchSize == 0 {
|
|
|
|
messages = l.postMessages(messages, false)
|
|
|
|
}
|
|
|
|
case <-timer.C:
|
|
|
|
messages = l.postMessages(messages, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
|
|
|
|
messagesLen := len(messages)
|
2017-11-14 10:15:38 -05:00
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2016-08-25 14:27:02 -04:00
|
|
|
for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
|
|
|
|
upperBound := i + l.postMessagesBatchSize
|
|
|
|
if upperBound > messagesLen {
|
|
|
|
upperBound = messagesLen
|
|
|
|
}
|
2017-11-14 10:15:38 -05:00
|
|
|
|
|
|
|
if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
|
|
|
|
logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")
|
2016-08-25 14:27:02 -04:00
|
|
|
if messagesLen-i >= l.bufferMaximum || lastChance {
|
|
|
|
// If this is last chance - print them all to the daemon log
|
|
|
|
if lastChance {
|
|
|
|
upperBound = messagesLen
|
|
|
|
}
|
|
|
|
// Not all sent, but buffer has got to its maximum, let's log all messages
|
|
|
|
// we could not send and return buffer minus one batch size
|
|
|
|
for j := i; j < upperBound; j++ {
|
|
|
|
if jsonEvent, err := json.Marshal(messages[j]); err != nil {
|
|
|
|
logrus.Error(err)
|
|
|
|
} else {
|
|
|
|
logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return messages[upperBound:messagesLen]
|
|
|
|
}
|
|
|
|
// Not all sent, returning buffer from where we have not sent messages
|
|
|
|
return messages[i:messagesLen]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// All sent, return empty buffer
|
|
|
|
return messages[:0]
|
|
|
|
}
|
|
|
|
|
2017-11-14 10:15:38 -05:00
|
|
|
func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error {
|
2016-08-25 14:27:02 -04:00
|
|
|
if len(messages) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var buffer bytes.Buffer
|
|
|
|
var writer io.Writer
|
|
|
|
var gzipWriter *gzip.Writer
|
|
|
|
var err error
|
|
|
|
// If gzip compression is enabled - create gzip writer with specified compression
|
|
|
|
// level. If gzip compression is disabled, use standard buffer as a writer
|
|
|
|
if l.gzipCompression {
|
|
|
|
gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
writer = gzipWriter
|
|
|
|
} else {
|
|
|
|
writer = &buffer
|
|
|
|
}
|
|
|
|
for _, message := range messages {
|
|
|
|
jsonEvent, err := json.Marshal(message)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := writer.Write(jsonEvent); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If gzip compression is enabled, tell it, that we are done
|
|
|
|
if l.gzipCompression {
|
|
|
|
err = gzipWriter.Close()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
2019-10-12 14:41:39 -04:00
|
|
|
req, err := http.NewRequest(http.MethodPost, l.url, bytes.NewBuffer(buffer.Bytes()))
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-14 10:15:38 -05:00
|
|
|
req = req.WithContext(ctx)
|
2015-08-27 19:03:46 -04:00
|
|
|
req.Header.Set("Authorization", l.auth)
|
2016-08-25 14:27:02 -04:00
|
|
|
// Tell if we are sending gzip compressed body
|
|
|
|
if l.gzipCompression {
|
|
|
|
req.Header.Set("Content-Encoding", "gzip")
|
|
|
|
}
|
2019-09-25 07:27:28 -04:00
|
|
|
// Set the correct header if index acknowledgment is enabled
|
|
|
|
if l.indexAck {
|
|
|
|
requestChannel, err := uuid.NewRandom()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
req.Header.Set("X-Splunk-Request-Channel", requestChannel.String())
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
resp, err := l.client.Do(req)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
defer func() {
|
2021-08-24 06:10:50 -04:00
|
|
|
pools.Copy(io.Discard, resp.Body)
|
2017-11-15 09:46:32 -05:00
|
|
|
resp.Body.Close()
|
|
|
|
}()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
rdr := io.LimitReader(resp.Body, maxResponseSize)
|
2021-08-24 06:10:50 -04:00
|
|
|
body, err := io.ReadAll(rdr)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body))
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLogger) Close() error {
|
2016-08-25 14:27:02 -04:00
|
|
|
l.lock.Lock()
|
|
|
|
defer l.lock.Unlock()
|
|
|
|
if l.closedCond == nil {
|
|
|
|
l.closedCond = sync.NewCond(&l.lock)
|
|
|
|
close(l.stream)
|
|
|
|
for !l.closed {
|
|
|
|
l.closedCond.Wait()
|
|
|
|
}
|
|
|
|
}
|
2015-08-27 19:03:46 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *splunkLogger) Name() string {
|
|
|
|
return driverName
|
|
|
|
}
|
|
|
|
|
2016-08-18 12:17:06 -04:00
|
|
|
func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage {
|
2016-06-10 00:06:44 -04:00
|
|
|
message := *l.nullMessage
|
2016-08-18 12:17:06 -04:00
|
|
|
message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second))
|
|
|
|
return &message
|
2016-06-10 00:06:44 -04:00
|
|
|
}
|
|
|
|
|
2015-08-27 19:03:46 -04:00
|
|
|
// ValidateLogOpt looks for all supported by splunk driver options
|
|
|
|
func ValidateLogOpt(cfg map[string]string) error {
|
|
|
|
for key := range cfg {
|
|
|
|
switch key {
|
|
|
|
case splunkURLKey:
|
|
|
|
case splunkTokenKey:
|
|
|
|
case splunkSourceKey:
|
|
|
|
case splunkSourceTypeKey:
|
|
|
|
case splunkIndexKey:
|
|
|
|
case splunkCAPathKey:
|
|
|
|
case splunkCANameKey:
|
|
|
|
case splunkInsecureSkipVerifyKey:
|
2016-06-10 00:06:44 -04:00
|
|
|
case splunkFormatKey:
|
|
|
|
case splunkVerifyConnectionKey:
|
2016-08-25 14:27:02 -04:00
|
|
|
case splunkGzipCompressionKey:
|
|
|
|
case splunkGzipCompressionLevelKey:
|
2019-09-25 07:27:28 -04:00
|
|
|
case splunkIndexAcknowledgment:
|
2015-11-10 19:42:27 -05:00
|
|
|
case envKey:
|
2016-11-08 19:34:47 -05:00
|
|
|
case envRegexKey:
|
2015-11-10 19:42:27 -05:00
|
|
|
case labelsKey:
|
2019-01-16 16:52:22 -05:00
|
|
|
case labelsRegexKey:
|
2015-11-10 19:42:27 -05:00
|
|
|
case tagKey:
|
2015-08-27 19:03:46 -04:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-26 00:08:34 -05:00
|
|
|
func parseURL(info logger.Info) (*url.URL, error) {
|
|
|
|
splunkURLStr, ok := info.Config[splunkURLKey]
|
2015-08-27 19:03:46 -04:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
splunkURL, err := url.Parse(splunkURLStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !urlutil.IsURL(splunkURLStr) ||
|
|
|
|
!splunkURL.IsAbs() ||
|
|
|
|
(splunkURL.Path != "" && splunkURL.Path != "/") ||
|
|
|
|
splunkURL.RawQuery != "" ||
|
|
|
|
splunkURL.Fragment != "" {
|
2016-03-29 02:36:38 -04:00
|
|
|
return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey)
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
splunkURL.Path = "/services/collector/event/1.0"
|
|
|
|
|
|
|
|
return splunkURL, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func verifySplunkConnection(l *splunkLogger) error {
|
2016-08-25 14:27:02 -04:00
|
|
|
req, err := http.NewRequest(http.MethodOptions, l.url, nil)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
resp, err := l.client.Do(req)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
defer func() {
|
2021-08-24 06:10:50 -04:00
|
|
|
pools.Copy(io.Discard, resp.Body)
|
2017-11-15 09:46:32 -05:00
|
|
|
resp.Body.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
rdr := io.LimitReader(resp.Body, maxResponseSize)
|
2021-08-24 06:10:50 -04:00
|
|
|
body, err := io.ReadAll(rdr)
|
2015-08-27 19:03:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 09:46:32 -05:00
|
|
|
return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body))
|
2015-08-27 19:03:46 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2016-08-25 14:27:02 -04:00
|
|
|
|
|
|
|
func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration {
|
|
|
|
valueStr := os.Getenv(envName)
|
|
|
|
if valueStr == "" {
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
parsedValue, err := time.ParseDuration(valueStr)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err))
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
return parsedValue
|
|
|
|
}
|
|
|
|
|
|
|
|
func getAdvancedOptionInt(envName string, defaultValue int) int {
|
|
|
|
valueStr := os.Getenv(envName)
|
|
|
|
if valueStr == "" {
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
parsedValue, err := strconv.ParseInt(valueStr, 10, 32)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err))
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
return int(parsedValue)
|
|
|
|
}
|