1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #16961 from vdemeester/pr-15975-carry-for-docs

Carry #15975 - Add extra fields based on label and env for gelf/fluentd/json-file/journald log drivers
This commit is contained in:
Sebastiaan van Stijn 2015-10-13 09:30:32 -07:00
commit 3856c5efa6
12 changed files with 254 additions and 41 deletions

View file

@ -721,6 +721,8 @@ func (container *Container) getLogger() (logger.Logger, error) {
ContainerImageID: container.ImageID, ContainerImageID: container.ImageID,
ContainerImageName: container.Config.Image, ContainerImageName: container.Config.Image,
ContainerCreated: container.Created, ContainerCreated: container.Created,
ContainerEnv: container.Config.Env,
ContainerLabels: container.Config.Labels,
} }
// Set logging file for "json-logger" // Set logging file for "json-logger"

View file

@ -17,9 +17,49 @@ type Context struct {
ContainerImageID string ContainerImageID string
ContainerImageName string ContainerImageName string
ContainerCreated time.Time ContainerCreated time.Time
ContainerEnv []string
ContainerLabels map[string]string
LogPath string LogPath string
} }
// ExtraAttributes returns the user-defined extra attributes (labels,
// environment variables) in key-value format. This can be used by log drivers
// that support metadata to add more context to a log.
func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string {
extra := make(map[string]string)
labels, ok := ctx.Config["labels"]
if ok && len(labels) > 0 {
for _, l := range strings.Split(labels, ",") {
if v, ok := ctx.ContainerLabels[l]; ok {
if keyMod != nil {
l = keyMod(l)
}
extra[l] = v
}
}
}
env, ok := ctx.Config["env"]
if ok && len(env) > 0 {
envMapping := make(map[string]string)
for _, e := range ctx.ContainerEnv {
if kv := strings.SplitN(e, "=", 2); len(kv) == 2 {
envMapping[kv[0]] = kv[1]
}
}
for _, l := range strings.Split(env, ",") {
if v, ok := envMapping[l]; ok {
if keyMod != nil {
l = keyMod(l)
}
extra[l] = v
}
}
}
return extra
}
// Hostname returns the hostname from the underlying OS. // Hostname returns the hostname from the underlying OS.
func (ctx *Context) Hostname() (string, error) { func (ctx *Context) Hostname() (string, error) {
hostname, err := os.Hostname() hostname, err := os.Hostname()

View file

@ -20,6 +20,7 @@ type fluentd struct {
containerID string containerID string
containerName string containerName string
writer *fluent.Fluent writer *fluent.Fluent
extra map[string]string
} }
const ( const (
@ -51,9 +52,8 @@ func New(ctx logger.Context) (logger.Logger, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
extra := ctx.ExtraAttributes(nil)
logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s.", ctx.ContainerID, host, port, tag) logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra)
// logger tries to recoonect 2**32 - 1 times // logger tries to recoonect 2**32 - 1 times
// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32}) log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
@ -65,6 +65,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
containerID: ctx.ContainerID, containerID: ctx.ContainerID,
containerName: ctx.ContainerName, containerName: ctx.ContainerName,
writer: log, writer: log,
extra: extra,
}, nil }, nil
} }
@ -75,6 +76,9 @@ func (f *fluentd) Log(msg *logger.Message) error {
"source": msg.Source, "source": msg.Source,
"log": string(msg.Line), "log": string(msg.Line),
} }
for k, v := range f.extra {
data[k] = v
}
// fluent-logger-golang buffers logs from failures and disconnections, // fluent-logger-golang buffers logs from failures and disconnections,
// and these are transferred again automatically. // and these are transferred again automatically.
return f.writer.PostWithTime(f.tag, msg.Timestamp, data) return f.writer.PostWithTime(f.tag, msg.Timestamp, data)
@ -95,6 +99,8 @@ func ValidateLogOpt(cfg map[string]string) error {
case "fluentd-address": case "fluentd-address":
case "fluentd-tag": case "fluentd-tag":
case "tag": case "tag":
case "labels":
case "env":
default: default:
return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key)
} }

View file

@ -21,20 +21,10 @@ import (
const name = "gelf" const name = "gelf"
type gelfLogger struct { type gelfLogger struct {
writer *gelf.Writer writer *gelf.Writer
ctx logger.Context ctx logger.Context
fields gelfFields hostname string
} extra map[string]interface{}
type gelfFields struct {
hostname string
containerID string
containerName string
imageID string
imageName string
command string
tag string
created time.Time
} }
func init() { func init() {
@ -71,15 +61,24 @@ func New(ctx logger.Context) (logger.Logger, error) {
return nil, err return nil, err
} }
fields := gelfFields{ extra := map[string]interface{}{
hostname: hostname, "_container_id": ctx.ContainerID,
containerID: ctx.ContainerID, "_container_name": string(containerName),
containerName: string(containerName), "_image_id": ctx.ContainerImageID,
imageID: ctx.ContainerImageID, "_image_name": ctx.ContainerImageName,
imageName: ctx.ContainerImageName, "_command": ctx.Command(),
command: ctx.Command(), "_tag": tag,
tag: tag, "_created": ctx.ContainerCreated,
created: ctx.ContainerCreated, }
extraAttrs := ctx.ExtraAttributes(func(key string) string {
if key[0] == '_' {
return key
}
return "_" + key
})
for k, v := range extraAttrs {
extra[k] = v
} }
// create new gelfWriter // create new gelfWriter
@ -89,9 +88,10 @@ func New(ctx logger.Context) (logger.Logger, error) {
} }
return &gelfLogger{ return &gelfLogger{
writer: gelfWriter, writer: gelfWriter,
ctx: ctx, ctx: ctx,
fields: fields, hostname: hostname,
extra: extra,
}, nil }, nil
} }
@ -106,19 +106,11 @@ func (s *gelfLogger) Log(msg *logger.Message) error {
m := gelf.Message{ m := gelf.Message{
Version: "1.1", Version: "1.1",
Host: s.fields.hostname, Host: s.hostname,
Short: string(short), Short: string(short),
TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0,
Level: level, Level: level,
Extra: map[string]interface{}{ Extra: s.extra,
"_container_id": s.fields.containerID,
"_container_name": s.fields.containerName,
"_image_id": s.fields.imageID,
"_image_name": s.fields.imageName,
"_command": s.fields.command,
"_tag": s.fields.tag,
"_created": s.fields.created,
},
} }
if err := s.writer.WriteMessage(&m); err != nil { if err := s.writer.WriteMessage(&m); err != nil {
@ -143,6 +135,8 @@ func ValidateLogOpt(cfg map[string]string) error {
case "gelf-address": case "gelf-address":
case "gelf-tag": case "gelf-tag":
case "tag": case "tag":
case "labels":
case "env":
default: default:
return fmt.Errorf("unknown log opt '%s' for gelf log driver", key) return fmt.Errorf("unknown log opt '%s' for gelf log driver", key)
} }

View file

@ -6,6 +6,7 @@ package journald
import ( import (
"fmt" "fmt"
"strings"
"sync" "sync"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -46,10 +47,16 @@ func New(ctx logger.Context) (logger.Logger, error) {
if name[0] == '/' { if name[0] == '/' {
name = name[1:] name = name[1:]
} }
vars := map[string]string{ vars := map[string]string{
"CONTAINER_ID": ctx.ContainerID[:12], "CONTAINER_ID": ctx.ContainerID[:12],
"CONTAINER_ID_FULL": ctx.ContainerID, "CONTAINER_ID_FULL": ctx.ContainerID,
"CONTAINER_NAME": name} "CONTAINER_NAME": name,
}
extraAttrs := ctx.ExtraAttributes(strings.ToTitle)
for k, v := range extraAttrs {
vars[k] = v
}
return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil
} }
@ -58,6 +65,8 @@ func New(ctx logger.Context) (logger.Logger, error) {
func validateLogOpt(cfg map[string]string) error { func validateLogOpt(cfg map[string]string) error {
for key := range cfg { for key := range cfg {
switch key { switch key {
case "labels":
case "env":
default: default:
return fmt.Errorf("unknown log opt '%s' for journald log driver", key) return fmt.Errorf("unknown log opt '%s' for journald log driver", key)
} }

View file

@ -41,6 +41,7 @@ type JSONFileLogger struct {
ctx logger.Context ctx logger.Context
readers map[*logger.LogWatcher]struct{} // stores the active log followers readers map[*logger.LogWatcher]struct{} // stores the active log followers
notifyRotate *pubsub.Publisher notifyRotate *pubsub.Publisher
extra []byte // json-encoded extra attributes
} }
func init() { func init() {
@ -77,6 +78,16 @@ func New(ctx logger.Context) (logger.Logger, error) {
return nil, fmt.Errorf("max-file cannot be less than 1") return nil, fmt.Errorf("max-file cannot be less than 1")
} }
} }
var extra []byte
if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 {
var err error
extra, err = json.Marshal(attrs)
if err != nil {
return nil, err
}
}
return &JSONFileLogger{ return &JSONFileLogger{
f: log, f: log,
buf: bytes.NewBuffer(nil), buf: bytes.NewBuffer(nil),
@ -85,6 +96,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
n: maxFiles, n: maxFiles,
readers: make(map[*logger.LogWatcher]struct{}), readers: make(map[*logger.LogWatcher]struct{}),
notifyRotate: pubsub.NewPublisher(0, 1), notifyRotate: pubsub.NewPublisher(0, 1),
extra: extra,
}, nil }, nil
} }
@ -97,7 +109,12 @@ func (l *JSONFileLogger) Log(msg *logger.Message) error {
if err != nil { if err != nil {
return err return err
} }
err = (&jsonlog.JSONLogs{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf) err = (&jsonlog.JSONLogs{
Log: append(msg.Line, '\n'),
Stream: msg.Source,
Created: timestamp,
RawAttrs: l.extra,
}).MarshalJSONBuf(l.buf)
if err != nil { if err != nil {
return err return err
} }
@ -181,6 +198,8 @@ func ValidateLogOpt(cfg map[string]string) error {
switch key { switch key {
case "max-file": case "max-file":
case "max-size": case "max-size":
case "labels":
case "env":
default: default:
return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) return fmt.Errorf("unknown log opt '%s' for json-file log driver", key)
} }

View file

@ -1,9 +1,11 @@
package jsonfilelog package jsonfilelog
import ( import (
"encoding/json"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"strconv" "strconv"
"testing" "testing"
"time" "time"
@ -149,3 +151,51 @@ func TestJSONFileLoggerWithOpts(t *testing.T) {
} }
} }
func TestJSONFileLoggerWithLabelsEnv(t *testing.T) {
cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
tmp, err := ioutil.TempDir("", "docker-logger-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
filename := filepath.Join(tmp, "container.log")
config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"}
l, err := New(logger.Context{
ContainerID: cid,
LogPath: filename,
Config: config,
ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"},
ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"},
})
if err != nil {
t.Fatal(err)
}
defer l.Close()
if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line"), Source: "src1"}); err != nil {
t.Fatal(err)
}
res, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatal(err)
}
var jsonLog jsonlog.JSONLogs
if err := json.Unmarshal(res, &jsonLog); err != nil {
t.Fatal(err)
}
extra := make(map[string]string)
if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil {
t.Fatal(err)
}
expected := map[string]string{
"rack": "101",
"dc": "lhr",
"environ": "production",
"debug": "false",
"ssl": "true",
}
if !reflect.DeepEqual(extra, expected) {
t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected)
}
}

View file

@ -73,6 +73,24 @@ Refer to the [log tag option documentation](log_tags.md) for customizing
the log tag format. the log tag format.
### labels and env
The `labels` and `env` options takes a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence.
To use attributes, specify them when you start the Docker daemon.
```
docker daemon --log-driver=fluentd --log-opt labels=foo --log-opt env=foo,fizz
```
Then, run a container and specify values for the `labels` or `env`. For example, you might use this:
```
docker run --label foo=bar -e fizz=buzz -d -P training/webapp python app.py
````
This adds additional fields to the extra attributes of a logging message.
## Fluentd daemon management with Docker ## Fluentd daemon management with Docker
About `Fluentd` itself, see [the project webpage](http://www.fluentd.org) About `Fluentd` itself, see [the project webpage](http://www.fluentd.org)

View file

@ -36,6 +36,31 @@ You can set the logging driver for a specific container by using the
docker run --log-driver=journald ... docker run --log-driver=journald ...
## Options
Users can use the `--log-opt NAME=VALUE` flag to specify additional
journald logging driver options.
### labels and env
The `labels` and `env` options takes a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence.
To use attributes, specify them when you start the Docker daemon.
```
docker daemon --log-driver=journald --log-opt labels=foo --log-opt env=foo,fizz
```
Then, run a container and specify values for the `labels` or `env`. For example, you might use this:
```
docker run --label foo=bar -e fizz=buzz -d -P training/webapp python app.py
````
This adds additional metadata in the journal with each message, one
for each key that matches.
## Note regarding container names ## Note regarding container names
The value logged in the `CONTAINER_NAME` field is the container name The value logged in the `CONTAINER_NAME` field is the container name

View file

@ -27,12 +27,15 @@ container's logging driver. The following options are supported:
The `docker logs`command is available only for the `json-file` logging driver. The `docker logs`command is available only for the `json-file` logging driver.
## json-file options ## json-file options
The following logging options are supported for the `json-file` logging driver: The following logging options are supported for the `json-file` logging driver:
--log-opt max-size=[0-9+][k|m|g] --log-opt max-size=[0-9+][k|m|g]
--log-opt max-file=[0-9+] --log-opt max-file=[0-9+]
--log-opt labels=label1,label2
--log-opt env=env1,env2
Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If `max-size` is not set, then logs are not rolled over. Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If `max-size` is not set, then logs are not rolled over.
@ -41,6 +44,26 @@ Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k)
If `max-size` and `max-file` are set, `docker logs` only returns the log lines from the newest log file. If `max-size` and `max-file` are set, `docker logs` only returns the log lines from the newest log file.
The `labels` and `env` options add additional attributes for use with logging drivers that accept them. Each of these options takes a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence.
To use attributes, specify them when you start the Docker daemon.
```
docker daemon --log-driver=json-file --log-opt labels=foo --log-opt env=foo,fizz
```
Then, run a container and specify values for the `labels` or `env`. For example, you might use this:
```
docker run --label foo=bar -e fizz=buzz -d -P training/webapp python app.py
````
This adds additional fields depending on the driver, e.g. for
`json-file` that looks like:
"attrs":{"fizz":"buzz","foo":"bar"}
## syslog options ## syslog options
The following logging options are supported for the `syslog` logging driver: The following logging options are supported for the `syslog` logging driver:
@ -100,6 +123,8 @@ The GELF logging driver supports the following options:
--log-opt gelf-address=udp://host:port --log-opt gelf-address=udp://host:port
--log-opt tag="database" --log-opt tag="database"
--log-opt labels=label1,label2
--log-opt env=env1,env2
The `gelf-address` option specifies the remote GELF server address that the The `gelf-address` option specifies the remote GELF server address that the
driver connects to. Currently, only `udp` is supported as the transport and you must driver connects to. Currently, only `udp` is supported as the transport and you must
@ -112,6 +137,15 @@ By default, Docker uses the first 12 characters of the container ID to tag log m
Refer to the [log tag option documentation](log_tags.md) for customizing Refer to the [log tag option documentation](log_tags.md) for customizing
the log tag format. the log tag format.
The `labels` and `env` options are supported by the gelf logging
driver. It adds additional key on the `extra` fields, prefixed by an
underscore (`_`).
// […]
"_foo": "bar",
"_fizz": "buzz",
// […]
## fluentd options ## fluentd options
@ -128,6 +162,7 @@ If container cannot connect to the Fluentd daemon on the specified address,
the container stops immediately. For detailed information on working with this the container stops immediately. For detailed information on working with this
logging driver, see [the fluentd logging driver](fluentd.md) logging driver, see [the fluentd logging driver](fluentd.md)
## Specify Amazon CloudWatch Logs options ## Specify Amazon CloudWatch Logs options
The Amazon CloudWatch Logs logging driver supports the following options: The Amazon CloudWatch Logs logging driver supports the following options:

View file

@ -2,6 +2,7 @@ package jsonlog
import ( import (
"bytes" "bytes"
"encoding/json"
"unicode/utf8" "unicode/utf8"
) )
@ -12,6 +13,9 @@ type JSONLogs struct {
Log []byte `json:"log,omitempty"` Log []byte `json:"log,omitempty"`
Stream string `json:"stream,omitempty"` Stream string `json:"stream,omitempty"`
Created string `json:"time"` Created string `json:"time"`
// json-encoded bytes
RawAttrs json.RawMessage `json:"attrs,omitempty"`
} }
// MarshalJSONBuf is based on the same method from JSONLog // MarshalJSONBuf is based on the same method from JSONLog
@ -34,6 +38,15 @@ func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
buf.WriteString(`"stream":`) buf.WriteString(`"stream":`)
ffjsonWriteJSONString(buf, mj.Stream) ffjsonWriteJSONString(buf, mj.Stream)
} }
if len(mj.RawAttrs) > 0 {
if first == true {
first = false
} else {
buf.WriteString(`,`)
}
buf.WriteString(`"attrs":`)
buf.Write(mj.RawAttrs)
}
if first == true { if first == true {
first = false first = false
} else { } else {

View file

@ -21,6 +21,8 @@ func TestJSONLogsMarshalJSONBuf(t *testing.T) {
&JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`,
&JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`,
&JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`,
// with raw attributes
&JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`,
} }
for jsonLog, expression := range logs { for jsonLog, expression := range logs {
var buf bytes.Buffer var buf bytes.Buffer