mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Add log reading to the journald log driver
If a logdriver doesn't register a callback function to validate log options, it won't be usable. Fix the journald driver by adding a dummy validator. Teach the client and the daemon's "logs" logic that the server can also supply "logs" data via the "journald" driver. Update documentation and tests that depend on error messages. Add support for reading log data from the systemd journal to the journald log driver. The internal logic uses a goroutine to scan the journal for matching entries after any specified cutoff time, formats the messages from those entries as JSONLog messages, and stuffs the results down a pipe whose reading end we hand back to the caller. If we are missing any of the 'linux', 'cgo', or 'journald' build tags, however, we don't implement a reader, so the 'logs' endpoint will still return an error. Make the necessary changes to the build setup to ensure that support for reading container logs from the systemd journal is built. Rename the Jmap member of the journald logdriver's struct to "vars" to make it non-public, and to make it easier to tell that it's just there to hold additional variable values that we want journald to record along with log data that we're sending to it. In the client, don't assume that we know which logdrivers the server implements, and remove the check that looks at the server. It's redundant because the server already knows, and the check also makes using older clients with newer servers (which may have new logdrivers in them) unnecessarily hard. When we try to "logs" and have to report that the container's logdriver doesn't support reading, send the error message through the might-be-a-multiplexer so that clients which are expecting multiplexed data will be able to properly display the error, instead of tripping over the data and printing a less helpful "Unrecognized input header" error. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
This commit is contained in:
parent
6f15546f6b
commit
e611a189cb
27 changed files with 381 additions and 38 deletions
|
@ -46,8 +46,10 @@ RUN apt-get update && apt-get install -y \
|
|||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsqlite3-dev \
|
||||
libsystemd-journal-dev \
|
||||
mercurial \
|
||||
parallel \
|
||||
pkg-config \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-websocket \
|
||||
|
|
|
@ -2,7 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
|
@ -37,10 +36,6 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if logType := c.HostConfig.LogConfig.Type; logType != "json-file" {
|
||||
return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType)
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("stdout", "1")
|
||||
v.Set("stderr", "1")
|
||||
|
|
|
@ -141,7 +141,10 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
|
||||
fmt.Fprintf(w, "Error running logs job: %s\n", err)
|
||||
// The client may be expecting all of the data we're sending to
|
||||
// be multiplexed, so send it through OutStream, which will
|
||||
// have been set up to handle that if needed.
|
||||
fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %s\n", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM debian:jessie
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM debian:stretch
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM debian:wheezy-backports
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools/wheezy-backports build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools/wheezy-backports build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -56,6 +56,12 @@ for version in "${versions[@]}"; do
|
|||
libdevmapper-dev # for "libdevmapper.h"
|
||||
libsqlite3-dev # for "sqlite3.h"
|
||||
)
|
||||
# packaging for "sd-journal.h" and libraries varies
|
||||
case "$suite" in
|
||||
precise) ;;
|
||||
sid|stretch|wily) packages+=( libsystemd-dev );;
|
||||
*) packages+=( libsystemd-journal-dev );;
|
||||
esac
|
||||
|
||||
if [ "$suite" = 'precise' ]; then
|
||||
# precise has a few package issues
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM ubuntu:vivid
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
FROM ubuntu:wily
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
|
|
@ -6,6 +6,7 @@ package journald
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
|
@ -15,13 +16,22 @@ import (
|
|||
const name = "journald"
|
||||
|
||||
type journald struct {
|
||||
Jmap map[string]string
|
||||
vars map[string]string // additional variables and values to send to the journal along with the log message
|
||||
readers readerList
|
||||
}
|
||||
|
||||
type readerList struct {
|
||||
mu sync.Mutex
|
||||
readers map[*logger.LogWatcher]*logger.LogWatcher
|
||||
}
|
||||
|
||||
func init() {
|
||||
if err := logger.RegisterLogDriver(name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a journald logger using the configuration passed in on
|
||||
|
@ -36,22 +46,30 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
|||
if name[0] == '/' {
|
||||
name = name[1:]
|
||||
}
|
||||
jmap := map[string]string{
|
||||
vars := map[string]string{
|
||||
"CONTAINER_ID": ctx.ContainerID[:12],
|
||||
"CONTAINER_ID_FULL": ctx.ContainerID,
|
||||
"CONTAINER_NAME": name}
|
||||
return &journald{Jmap: jmap}, nil
|
||||
return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil
|
||||
}
|
||||
|
||||
// We don't actually accept any options, but we have to supply a callback for
|
||||
// the factory to pass the (probably empty) configuration map to.
|
||||
func validateLogOpt(cfg map[string]string) error {
|
||||
for key := range cfg {
|
||||
switch key {
|
||||
default:
|
||||
return fmt.Errorf("unknown log opt '%s' for journald log driver", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *journald) Log(msg *logger.Message) error {
|
||||
if msg.Source == "stderr" {
|
||||
return journal.Send(string(msg.Line), journal.PriErr, s.Jmap)
|
||||
return journal.Send(string(msg.Line), journal.PriErr, s.vars)
|
||||
}
|
||||
return journal.Send(string(msg.Line), journal.PriInfo, s.Jmap)
|
||||
}
|
||||
|
||||
func (s *journald) Close() error {
|
||||
return nil
|
||||
return journal.Send(string(msg.Line), journal.PriInfo, s.vars)
|
||||
}
|
||||
|
||||
func (s *journald) Name() string {
|
||||
|
|
303
daemon/logger/journald/read.go
Normal file
303
daemon/logger/journald/read.go
Normal file
|
@ -0,0 +1,303 @@
|
|||
// +build linux,cgo,!static_build,journald
|
||||
|
||||
package journald
|
||||
|
||||
// #cgo pkg-config: libsystemd-journal
|
||||
// #include <sys/types.h>
|
||||
// #include <sys/poll.h>
|
||||
// #include <systemd/sd-journal.h>
|
||||
// #include <errno.h>
|
||||
// #include <stdio.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <string.h>
|
||||
// #include <time.h>
|
||||
// #include <unistd.h>
|
||||
//
|
||||
//static int get_message(sd_journal *j, const char **msg, size_t *length)
|
||||
//{
|
||||
// int rc;
|
||||
// *msg = NULL;
|
||||
// *length = 0;
|
||||
// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length);
|
||||
// if (rc == 0) {
|
||||
// if (*length > 8) {
|
||||
// (*msg) += 8;
|
||||
// *length -= 8;
|
||||
// } else {
|
||||
// *msg = NULL;
|
||||
// *length = 0;
|
||||
// rc = -ENOENT;
|
||||
// }
|
||||
// }
|
||||
// return rc;
|
||||
//}
|
||||
//static int get_priority(sd_journal *j, int *priority)
|
||||
//{
|
||||
// const void *data;
|
||||
// size_t i, length;
|
||||
// int rc;
|
||||
// *priority = -1;
|
||||
// rc = sd_journal_get_data(j, "PRIORITY", &data, &length);
|
||||
// if (rc == 0) {
|
||||
// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) {
|
||||
// *priority = 0;
|
||||
// for (i = 9; i < length; i++) {
|
||||
// *priority = *priority * 10 + ((const char *)data)[i] - '0';
|
||||
// }
|
||||
// if (length > 9) {
|
||||
// rc = 0;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return rc;
|
||||
//}
|
||||
//static int wait_for_data_or_close(sd_journal *j, int pipefd)
|
||||
//{
|
||||
// struct pollfd fds[2];
|
||||
// uint64_t when = 0;
|
||||
// int timeout, jevents, i;
|
||||
// struct timespec ts;
|
||||
// uint64_t now;
|
||||
// do {
|
||||
// memset(&fds, 0, sizeof(fds));
|
||||
// fds[0].fd = pipefd;
|
||||
// fds[0].events = POLLHUP;
|
||||
// fds[1].fd = sd_journal_get_fd(j);
|
||||
// if (fds[1].fd < 0) {
|
||||
// return -1;
|
||||
// }
|
||||
// jevents = sd_journal_get_events(j);
|
||||
// if (jevents < 0) {
|
||||
// return -1;
|
||||
// }
|
||||
// fds[1].events = jevents;
|
||||
// sd_journal_get_timeout(j, &when);
|
||||
// if (when == -1) {
|
||||
// timeout = -1;
|
||||
// } else {
|
||||
// clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
|
||||
// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0;
|
||||
// }
|
||||
// i = poll(fds, 2, timeout);
|
||||
// if ((i == -1) && (errno != EINTR)) {
|
||||
// /* An unexpected error. */
|
||||
// return -1;
|
||||
// }
|
||||
// if (fds[0].revents & POLLHUP) {
|
||||
// /* The close notification pipe was closed. */
|
||||
// return 0;
|
||||
// }
|
||||
// if (sd_journal_process(j) == SD_JOURNAL_APPEND) {
|
||||
// /* Data, which we might care about, was appended. */
|
||||
// return 1;
|
||||
// }
|
||||
// } while ((fds[0].revents & POLLHUP) == 0);
|
||||
// return 0;
|
||||
//}
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
)
|
||||
|
||||
func (s *journald) Close() error {
|
||||
s.readers.mu.Lock()
|
||||
for reader := range s.readers.readers {
|
||||
reader.Close()
|
||||
}
|
||||
s.readers.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string {
|
||||
var msg, cursor *C.char
|
||||
var length C.size_t
|
||||
var stamp C.uint64_t
|
||||
var priority C.int
|
||||
|
||||
// Walk the journal from here forward until we run out of new entries.
|
||||
drain:
|
||||
for {
|
||||
// Try not to send a given entry twice.
|
||||
if oldCursor != "" {
|
||||
ccursor := C.CString(oldCursor)
|
||||
defer C.free(unsafe.Pointer(ccursor))
|
||||
for C.sd_journal_test_cursor(j, ccursor) > 0 {
|
||||
if C.sd_journal_next(j) <= 0 {
|
||||
break drain
|
||||
}
|
||||
}
|
||||
}
|
||||
// Read and send the logged message, if there is one to read.
|
||||
i := C.get_message(j, &msg, &length)
|
||||
if i != -C.ENOENT && i != -C.EADDRNOTAVAIL {
|
||||
// Read the entry's timestamp.
|
||||
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
|
||||
break
|
||||
}
|
||||
// Set up the time and text of the entry.
|
||||
timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
|
||||
line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...)
|
||||
// Recover the stream name by mapping
|
||||
// from the journal priority back to
|
||||
// the stream that we would have
|
||||
// assigned that value.
|
||||
source := ""
|
||||
if C.get_priority(j, &priority) != 0 {
|
||||
source = ""
|
||||
} else if priority == C.int(journal.PriErr) {
|
||||
source = "stderr"
|
||||
} else if priority == C.int(journal.PriInfo) {
|
||||
source = "stdout"
|
||||
}
|
||||
// Send the log message.
|
||||
cid := s.vars["CONTAINER_ID_FULL"]
|
||||
logWatcher.Msg <- &logger.Message{ContainerID: cid, Line: line, Source: source, Timestamp: timestamp}
|
||||
}
|
||||
// If we're at the end of the journal, we're done (for now).
|
||||
if C.sd_journal_next(j) <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
retCursor := ""
|
||||
if C.sd_journal_get_cursor(j, &cursor) == 0 {
|
||||
retCursor = C.GoString(cursor)
|
||||
C.free(unsafe.Pointer(cursor))
|
||||
}
|
||||
return retCursor
|
||||
}
|
||||
|
||||
func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) {
|
||||
go func() {
|
||||
// Keep copying journal data out until we're notified to stop.
|
||||
for C.wait_for_data_or_close(j, pfd[0]) == 1 {
|
||||
cursor = s.drainJournal(logWatcher, config, j, cursor)
|
||||
}
|
||||
// Clean up.
|
||||
C.close(pfd[0])
|
||||
s.readers.mu.Lock()
|
||||
delete(s.readers.readers, logWatcher)
|
||||
s.readers.mu.Unlock()
|
||||
}()
|
||||
s.readers.mu.Lock()
|
||||
s.readers.readers[logWatcher] = logWatcher
|
||||
s.readers.mu.Unlock()
|
||||
// Wait until we're told to stop.
|
||||
select {
|
||||
case <-logWatcher.WatchClose():
|
||||
// Notify the other goroutine that its work is done.
|
||||
C.close(pfd[1])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
|
||||
var j *C.sd_journal
|
||||
var cmatch *C.char
|
||||
var stamp C.uint64_t
|
||||
var sinceUnixMicro uint64
|
||||
var pipes [2]C.int
|
||||
cursor := ""
|
||||
|
||||
defer close(logWatcher.Msg)
|
||||
// Get a handle to the journal.
|
||||
rc := C.sd_journal_open(&j, C.int(0))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error opening journal")
|
||||
return
|
||||
}
|
||||
defer C.sd_journal_close(j)
|
||||
// Remove limits on the size of data items that we'll retrieve.
|
||||
rc = C.sd_journal_set_data_threshold(j, C.size_t(0))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error setting journal data threshold")
|
||||
return
|
||||
}
|
||||
// Add a match to have the library do the searching for us.
|
||||
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
|
||||
if cmatch == nil {
|
||||
logWatcher.Err <- fmt.Errorf("error reading container ID")
|
||||
return
|
||||
}
|
||||
defer C.free(unsafe.Pointer(cmatch))
|
||||
rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error setting journal match")
|
||||
return
|
||||
}
|
||||
// If we have a cutoff time, convert it to Unix time once.
|
||||
if !config.Since.IsZero() {
|
||||
nano := config.Since.UnixNano()
|
||||
sinceUnixMicro = uint64(nano / 1000)
|
||||
}
|
||||
if config.Tail > 0 {
|
||||
lines := config.Tail
|
||||
// Start at the end of the journal.
|
||||
if C.sd_journal_seek_tail(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to end of journal")
|
||||
return
|
||||
}
|
||||
if C.sd_journal_previous(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry")
|
||||
return
|
||||
}
|
||||
// Walk backward.
|
||||
for lines > 0 {
|
||||
// Stop if the entry time is before our cutoff.
|
||||
// We'll need the entry time if it isn't, so go
|
||||
// ahead and parse it now.
|
||||
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
|
||||
break
|
||||
} else {
|
||||
// Compare the timestamp on the entry
|
||||
// to our threshold value.
|
||||
if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) {
|
||||
break
|
||||
}
|
||||
}
|
||||
lines--
|
||||
// If we're at the start of the journal, or
|
||||
// don't need to back up past any more entries,
|
||||
// stop.
|
||||
if lines == 0 || C.sd_journal_previous(j) <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Start at the beginning of the journal.
|
||||
if C.sd_journal_seek_head(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to start of journal")
|
||||
return
|
||||
}
|
||||
// If we have a cutoff date, fast-forward to it.
|
||||
if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to start time in journal")
|
||||
return
|
||||
}
|
||||
if C.sd_journal_next(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error skipping to next journal entry")
|
||||
return
|
||||
}
|
||||
}
|
||||
cursor = s.drainJournal(logWatcher, config, j, "")
|
||||
if config.Follow {
|
||||
// Create a pipe that we can poll at the same time as the journald descriptor.
|
||||
if C.pipe(&pipes[0]) == C.int(-1) {
|
||||
logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe")
|
||||
} else {
|
||||
s.followJournal(logWatcher, config, j, pipes, cursor)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
|
||||
logWatcher := logger.NewLogWatcher()
|
||||
go s.readLogs(logWatcher, config)
|
||||
return logWatcher
|
||||
}
|
7
daemon/logger/journald/read_unsupported.go
Normal file
7
daemon/logger/journald/read_unsupported.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !linux !cgo static_build !journald
|
||||
|
||||
package journald
|
||||
|
||||
func (s *journald) Close() error {
|
||||
return nil
|
||||
}
|
|
@ -41,6 +41,7 @@ func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsC
|
|||
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
|
||||
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
||||
}
|
||||
config.OutStream = outStream
|
||||
|
||||
cLog, err := container.getLogger()
|
||||
if err != nil {
|
||||
|
|
|
@ -279,7 +279,7 @@ Json Parameters:
|
|||
systems, such as SELinux.
|
||||
- **LogConfig** - Log configuration for the container, specified as
|
||||
`{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
|
||||
Available types: `json-file`, `syslog`, `none`.
|
||||
Available types: `json-file`, `syslog`, `journald`, `none`.
|
||||
`json-file` logging driver.
|
||||
- **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
|
||||
|
||||
|
@ -480,7 +480,7 @@ Status Codes:
|
|||
Get stdout and stderr logs from the container ``id``
|
||||
|
||||
> **Note**:
|
||||
> This endpoint works only for containers with `json-file` logging driver.
|
||||
> This endpoint works only for containers with the `json-file` or `journald` logging drivers.
|
||||
|
||||
**Example request**:
|
||||
|
||||
|
|
|
@ -493,7 +493,7 @@ Status Codes:
|
|||
Get `stdout` and `stderr` logs from the container ``id``
|
||||
|
||||
> **Note**:
|
||||
> This endpoint works only for containers with `json-file` logging driver.
|
||||
> This endpoint works only for containers with the `json-file` or `journald` logging drivers.
|
||||
|
||||
**Example request**:
|
||||
|
||||
|
|
|
@ -504,7 +504,7 @@ Status Codes:
|
|||
Get `stdout` and `stderr` logs from the container ``id``
|
||||
|
||||
> **Note**:
|
||||
> This endpoint works only for containers with `json-file` logging driver.
|
||||
> This endpoint works only for containers with the `json-file` or `journald` logging drivers.
|
||||
|
||||
**Example request**:
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ weight=1
|
|||
-t, --timestamps=false Show timestamps
|
||||
--tail="all" Number of lines to show from the end of the logs
|
||||
|
||||
NOTE: this command is available only for containers with `json-file` logging
|
||||
driver.
|
||||
NOTE: this command is available only for containers with `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
||||
The `docker logs` command batch-retrieves logs present at the time of execution.
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ parent = "smn_logging"
|
|||
|
||||
The `journald` logging driver sends container logs to the [systemd
|
||||
journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html). Log entries can be retrieved using the `journalctl`
|
||||
command or through use of the journal API.
|
||||
command, through use of the journal API, or using the `docker logs` command.
|
||||
|
||||
In addition to the text of the log message itself, the `journald` log
|
||||
driver stores the following metadata in the journal with each message:
|
||||
|
|
|
@ -1013,8 +1013,8 @@ container's logging driver. The following options are supported:
|
|||
| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). |
|
||||
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs |
|
||||
|
||||
The `docker logs`command is available only for the `json-file` logging
|
||||
driver. For detailed information on working with logging drivers, see
|
||||
The `docker logs` command is available only for the `json-file` and `journald`
|
||||
logging drivers. For detailed information on working with logging drivers, see
|
||||
[Configure a logging driver](/reference/logging/overview/).
|
||||
|
||||
|
||||
|
|
|
@ -104,6 +104,9 @@ fi
|
|||
|
||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||
DOCKER_BUILDTAGS+=" daemon"
|
||||
if pkg-config libsystemd-journal 2> /dev/null ; then
|
||||
DOCKER_BUILDTAGS+=" journald"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then
|
||||
|
|
|
@ -26,6 +26,7 @@ Packager: Docker <support@docker.com>
|
|||
# only require systemd on those systems
|
||||
%if 0%{?is_systemd}
|
||||
BuildRequires: pkgconfig(systemd)
|
||||
BuildRequires: pkgconfig(libsystemd-journal)
|
||||
Requires: systemd-units
|
||||
%else
|
||||
Requires(post): chkconfig
|
||||
|
|
|
@ -1196,11 +1196,11 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) {
|
|||
}
|
||||
id := strings.TrimSpace(out)
|
||||
out, err = s.d.Cmd("logs", id)
|
||||
if err == nil {
|
||||
c.Fatalf("Logs should fail with \"none\" driver")
|
||||
if err != nil {
|
||||
c.Fatalf("Logs request should be sent and then fail with \"none\" driver")
|
||||
}
|
||||
if !strings.Contains(out, `"logs" command is supported only for "json-file" logging driver`) {
|
||||
c.Fatalf("There should be error about non-json-file driver, got: %s", out)
|
||||
if !strings.Contains(out, `Error running logs job: Failed to get logging factory: logger: no log driver named 'none' is registered`) {
|
||||
c.Fatalf("There should be an error about none not being a recognized log driver, got: %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,8 @@ millions of trillions.
|
|||
|
||||
**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*none*"
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
**Warning**: the `docker logs` command works only for the `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
|
|
@ -23,7 +23,8 @@ The **docker logs --follow** command combines commands **docker logs** and
|
|||
**docker attach**. It will first return all logs from the beginning and
|
||||
then continue streaming new output from the container’s stdout and stderr.
|
||||
|
||||
**Warning**: This command works only for **json-file** logging driver.
|
||||
**Warning**: This command works only for the **json-file** or **journald**
|
||||
logging drivers.
|
||||
|
||||
# OPTIONS
|
||||
**--help**
|
||||
|
|
|
@ -271,7 +271,8 @@ which interface and port to use.
|
|||
|
||||
**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*none*"
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
**Warning**: the `docker logs` command works only for the `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
|
|
@ -121,7 +121,8 @@ unix://[/path/to/socket] to use.
|
|||
|
||||
**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*none*"
|
||||
Default driver for container logs. Default is `json-file`.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
**Warning**: the `docker logs` command works only for the `json-file` and
|
||||
`journald` logging drivers.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
|
Loading…
Reference in a new issue