1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

refactor logs and support service logs /w tty

Refactor container logs system to make communicating log messages
internally much simpler. Move responsibility for marshalling log
messages into the REST server. Support TTY logs. Pave the way for fixing
the ambiguous bytestream format. Pave the way for fixing details.

Signed-off-by: Drew Erny <drew.erny@docker.com>
This commit is contained in:
Drew Erny 2017-03-20 10:07:04 -07:00
parent f6b7dc9837
commit 1044093bb0
14 changed files with 454 additions and 315 deletions

View file

@ -0,0 +1,92 @@
package httputils
import (
"fmt"
"io"
"sort"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/stdcopy"
)
// WriteLogStream writes an encoded byte stream of log messages from the
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
wf := ioutils.NewWriteFlusher(w)
defer wf.Close()
wf.Flush()
// this might seem like doing below is clear:
// var outStream io.Writer = wf
// however, this GREATLY DISPLEASES golint, and if you do that, it will
// fail CI. we need outstream to be type writer because if we mux streams,
// we will need to reassign all of the streams to be stdwriters, which only
// conforms to the io.Writer interface.
var outStream io.Writer
outStream = wf
errStream := outStream
sysErrStream := errStream
if mux {
sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr)
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
for {
msg, ok := <-msgs
if !ok {
return
}
// check if the message contains an error. if so, write that error
// and exit
if msg.Err != nil {
fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err)
continue
}
logLine := msg.Line
if config.Details {
logLine = append([]byte(stringAttrs(msg.Attrs)+" "), logLine...)
}
if config.Timestamps {
// TODO(dperny) the format is defined in
// daemon/logger/logger.go as logger.TimeFormat. importing
// logger is verboten (not part of backend) so idk if just
// importing the same thing from jsonlog is good enough
logLine = append([]byte(msg.Timestamp.Format(jsonlog.RFC3339NanoFixed)+" "), logLine...)
}
if msg.Source == "stdout" && config.ShowStdout {
outStream.Write(logLine)
}
if msg.Source == "stderr" && config.ShowStderr {
errStream.Write(logLine)
}
}
}
type byKey []string
func (s byKey) Len() int { return len(s) }
func (s byKey) Less(i, j int) bool {
keyI := strings.Split(s[i], "=")
keyJ := strings.Split(s[j], "=")
return keyI[0] < keyJ[0]
}
func (s byKey) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func stringAttrs(a backend.LogAttributes) string {
var ss byKey
for k, v := range a {
ss = append(ss, k+"="+v)
}
sort.Sort(ss)
return strings.Join(ss, ",")
}

View file

@ -51,7 +51,7 @@ type stateBackend interface {
type monitorBackend interface { type monitorBackend interface {
ContainerChanges(name string) ([]archive.Change, error) ContainerChanges(name string) ([]archive.Change, error)
ContainerInspect(name string, size bool, version string) (interface{}, error) ContainerInspect(name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)

View file

@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/backend"
@ -18,7 +19,6 @@ import (
"github.com/docker/docker/api/types/versions" "github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )
@ -91,33 +91,38 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
} }
containerName := vars["name"] containerName := vars["name"]
logsConfig := &backend.ContainerLogsConfig{ logsConfig := &types.ContainerLogsOptions{
ContainerLogsOptions: types.ContainerLogsOptions{ Follow: httputils.BoolValue(r, "follow"),
Follow: httputils.BoolValue(r, "follow"), Timestamps: httputils.BoolValue(r, "timestamps"),
Timestamps: httputils.BoolValue(r, "timestamps"), Since: r.Form.Get("since"),
Since: r.Form.Get("since"), Tail: r.Form.Get("tail"),
Tail: r.Form.Get("tail"), ShowStdout: stdout,
ShowStdout: stdout, ShowStderr: stderr,
ShowStderr: stderr, Details: httputils.BoolValue(r, "details"),
Details: httputils.BoolValue(r, "details"),
},
OutStream: w,
} }
chStarted := make(chan struct{}) // doesn't matter what version the client is on, we're using this internally only
if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { // also do we need size? i'm thinkin no we don't
select { raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion)
case <-chStarted: if err != nil {
// The client may be expecting all of the data we're sending to return err
// be multiplexed, so mux it through the Systemerr stream, which }
// will cause the client to throw an error when demuxing container, ok := raw.(*types.ContainerJSON)
stdwriter := stdcopy.NewStdWriter(logsConfig.OutStream, stdcopy.Systemerr) if !ok {
fmt.Fprintf(stdwriter, "Error running logs job: %v\n", err) // %T prints the type. handy!
default: return fmt.Errorf("expected container to be *types.ContainerJSON but got %T", raw)
return err
}
} }
msgs, err := s.backend.ContainerLogs(ctx, containerName, logsConfig)
if err != nil {
return err
}
// if has a tty, we're not muxing streams. if it doesn't, we are. simple.
// this is the point of no return for writing a response. once we call
// WriteLogStream, the response has been started and errors will be
// returned in band by WriteLogStream
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !container.Config.Tty)
return nil return nil
} }

View file

@ -21,7 +21,7 @@ type Backend interface {
CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error)
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error) UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error)
RemoveService(string) error RemoveService(string) error
ServiceLogs(context.Context, *backend.LogSelector, *backend.ContainerLogsConfig, chan struct{}) error ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
GetNodes(basictypes.NodeListOptions) ([]types.Node, error) GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
GetNode(string) (types.Node, error) GetNode(string) (types.Node, error)
UpdateNode(string, uint64, types.NodeSpec) error UpdateNode(string, uint64, types.NodeSpec) error

View file

@ -7,7 +7,6 @@ import (
"github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/docker/api/types" basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/backend"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -24,32 +23,43 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *
return fmt.Errorf("Bad parameters: you must choose at least one stream") return fmt.Errorf("Bad parameters: you must choose at least one stream")
} }
logsConfig := &backend.ContainerLogsConfig{ // there is probably a neater way to manufacture the ContainerLogsOptions
ContainerLogsOptions: basictypes.ContainerLogsOptions{ // struct, probably in the caller, to eliminate the dependency on net/http
Follow: httputils.BoolValue(r, "follow"), logsConfig := &basictypes.ContainerLogsOptions{
Timestamps: httputils.BoolValue(r, "timestamps"), Follow: httputils.BoolValue(r, "follow"),
Since: r.Form.Get("since"), Timestamps: httputils.BoolValue(r, "timestamps"),
Tail: r.Form.Get("tail"), Since: r.Form.Get("since"),
ShowStdout: stdout, Tail: r.Form.Get("tail"),
ShowStderr: stderr, ShowStdout: stdout,
Details: httputils.BoolValue(r, "details"), ShowStderr: stderr,
}, Details: httputils.BoolValue(r, "details"),
OutStream: w,
} }
chStarted := make(chan struct{}) tty := false
if err := sr.backend.ServiceLogs(ctx, selector, logsConfig, chStarted); err != nil { // checking for whether logs are TTY involves iterating over every service
select { // and task. idk if there is a better way
case <-chStarted: for _, service := range selector.Services {
// The client may be expecting all of the data we're sending to s, err := sr.backend.GetService(service)
// be multiplexed, so send it through OutStream, which will if err != nil {
// have been set up to handle that if needed. // maybe should return some context with this error?
stdwriter := stdcopy.NewStdWriter(w, stdcopy.Systemerr)
fmt.Fprintf(stdwriter, "Error grabbing service logs: %v\n", err)
default:
return err return err
} }
tty = s.Spec.TaskTemplate.ContainerSpec.TTY || tty
}
for _, task := range selector.Tasks {
t, err := sr.backend.GetTask(task)
if err != nil {
// as above
return err
}
tty = t.Spec.ContainerSpec.TTY || tty
} }
msgs, err := sr.backend.ServiceLogs(ctx, selector, logsConfig)
if err != nil {
return err
}
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty)
return nil return nil
} }

View file

@ -3,6 +3,7 @@ package backend
import ( import (
"io" "io"
"time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/streamformatter"
@ -25,13 +26,28 @@ type ContainerAttachConfig struct {
MuxStreams bool MuxStreams bool
} }
// ContainerLogsConfig holds configs for logging operations. Exists // LogMessage is datastructure that represents piece of output produced by some
// for users of the backend to to pass it a logging configuration. // container. The Line member is a slice of an array whose contents can be
type ContainerLogsConfig struct { // changed after a log driver's Log() method returns.
types.ContainerLogsOptions // changes to this struct need to be reflect in the reset method in
OutStream io.Writer // daemon/logger/logger.go
type LogMessage struct {
Line []byte
Source string
Timestamp time.Time
Attrs LogAttributes
Partial bool
// Err is an error associated with a message. Completeness of a message
// with Err is not expected, tho it may be partially complete (fields may
// be missing, gibberish, or nil)
Err error
} }
// LogAttributes is used to hold the extra attributes available in the log message
// Primarily used for converting the map type to string and sorting.
type LogAttributes map[string]string
// LogSelector is a list of services and tasks that should be returned as part // LogSelector is a list of services and tasks that should be returned as part
// of a log stream. It is similar to swarmapi.LogSelector, with the difference // of a log stream. It is similar to swarmapi.LogSelector, with the difference
// that the names don't have to be resolved to IDs; this is mostly to avoid // that the names don't have to be resolved to IDs; this is mostly to avoid

View file

@ -73,6 +73,7 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
Timestamps: opts.timestamps, Timestamps: opts.timestamps,
Follow: opts.follow, Follow: opts.follow,
Tail: opts.tail, Tail: opts.tail,
Details: true,
} }
cli := dockerCli.Client() cli := dockerCli.Client()
@ -80,6 +81,7 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
var ( var (
maxLength = 1 maxLength = 1
responseBody io.ReadCloser responseBody io.ReadCloser
tty bool
) )
service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target) service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target)
@ -89,6 +91,14 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
return err return err
} }
task, _, err := cli.TaskInspectWithRaw(ctx, opts.target) task, _, err := cli.TaskInspectWithRaw(ctx, opts.target)
tty = task.Spec.ContainerSpec.TTY
// TODO(dperny) hot fix until we get a nice details system squared away,
// ignores details (including task context) if we have a TTY log
if tty {
options.Details = false
}
responseBody, err = cli.TaskLogs(ctx, opts.target, options)
if err != nil { if err != nil {
if client.IsErrTaskNotFound(err) { if client.IsErrTaskNotFound(err) {
// if the task ALSO isn't found, rewrite the error to be clear // if the task ALSO isn't found, rewrite the error to be clear
@ -100,6 +110,13 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
maxLength = getMaxLength(task.Slot) maxLength = getMaxLength(task.Slot)
responseBody, err = cli.TaskLogs(ctx, opts.target, options) responseBody, err = cli.TaskLogs(ctx, opts.target, options)
} else { } else {
tty = service.Spec.TaskTemplate.ContainerSpec.TTY
// TODO(dperny) hot fix until we get a nice details system squared away,
// ignores details (including task context) if we have a TTY log
if tty {
options.Details = false
}
responseBody, err = cli.ServiceLogs(ctx, opts.target, options) responseBody, err = cli.ServiceLogs(ctx, opts.target, options)
if err != nil { if err != nil {
return err return err
@ -112,6 +129,11 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
} }
defer responseBody.Close() defer responseBody.Close()
if tty {
_, err = io.Copy(dockerCli.Out(), responseBody)
return err
}
taskFormatter := newTaskFormatter(cli, opts, maxLength) taskFormatter := newTaskFormatter(cli, opts, maxLength)
stdout := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Out()} stdout := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Out()}

View file

@ -33,7 +33,7 @@ type Backend interface {
CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(name string, seconds *int) error ContainerStop(name string, seconds *int) error
ContainerLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
ActivateContainerServiceBinding(containerName string) error ActivateContainerServiceBinding(containerName string) error
DeactivateContainerServiceBinding(containerName string) error DeactivateContainerServiceBinding(containerName string) error

View file

@ -396,26 +396,15 @@ func (c *containerAdapter) deactivateServiceBinding() error {
return c.backend.DeactivateContainerServiceBinding(c.container.name()) return c.backend.DeactivateContainerServiceBinding(c.container.name())
} }
func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
// we can't handle the peculiarities of a TTY-attached container yet apiOptions := &types.ContainerLogsOptions{
conf := c.container.config() Follow: options.Follow,
if conf != nil && conf.Tty {
return nil, errors.New("logs not supported on containers with a TTY attached")
}
reader, writer := io.Pipe() // TODO(stevvooe): Parse timestamp out of message. This
// absolutely needs to be done before going to production with
apiOptions := &backend.ContainerLogsConfig{ // this, at it is completely redundant.
ContainerLogsOptions: types.ContainerLogsOptions{ Timestamps: true,
Follow: options.Follow, Details: false, // no clue what to do with this, let's just deprecate it.
// TODO(stevvooe): Parse timestamp out of message. This
// absolutely needs to be done before going to production with
// this, at it is completely redundant.
Timestamps: true,
Details: false, // no clue what to do with this, let's just deprecate it.
},
OutStream: writer,
} }
if options.Since != nil { if options.Since != nil {
@ -449,14 +438,11 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription
} }
} }
} }
msgs, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
chStarted := make(chan struct{}) if err != nil {
go func() { return nil, err
defer writer.Close() }
c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) return msgs, nil
}()
return reader, nil
} }
// todo: typed/wrapped errors // todo: typed/wrapped errors

View file

@ -1,11 +1,7 @@
package container package container
import ( import (
"bufio"
"bytes"
"encoding/binary"
"fmt" "fmt"
"io"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@ -445,11 +441,12 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti
return errors.Wrap(err, "container not ready for logs") return errors.Wrap(err, "container not ready for logs")
} }
rc, err := r.adapter.logs(ctx, options) logsContext, cancel := context.WithCancel(ctx)
msgs, err := r.adapter.logs(logsContext, options)
defer cancel()
if err != nil { if err != nil {
return errors.Wrap(err, "failed getting container logs") return errors.Wrap(err, "failed getting container logs")
} }
defer rc.Close()
var ( var (
// use a rate limiter to keep things under control but also provides some // use a rate limiter to keep things under control but also provides some
@ -462,53 +459,38 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti
} }
) )
brd := bufio.NewReader(rc)
for { for {
// so, message header is 8 bytes, treat as uint64, pull stream off MSB msg, ok := <-msgs
var header uint64 if !ok {
if err := binary.Read(brd, binary.BigEndian, &header); err != nil { // we're done here, no more messages
if err == io.EOF { return nil
return nil
}
return errors.Wrap(err, "failed reading log header")
} }
stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) if msg.Err != nil {
// the defered cancel closes the adapter's log stream
return msg.Err
}
// limit here to decrease allocation back pressure. // wait here for the limiter to catch up
if err := limiter.WaitN(ctx, int(size)); err != nil { if err := limiter.WaitN(ctx, len(msg.Line)); err != nil {
return errors.Wrap(err, "failed rate limiter") return errors.Wrap(err, "failed rate limiter")
} }
tsp, err := gogotypes.TimestampProto(msg.Timestamp)
buf := make([]byte, size)
_, err := io.ReadFull(brd, buf)
if err != nil {
return errors.Wrap(err, "failed reading buffer")
}
// Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish
parts := bytes.SplitN(buf, []byte(" "), 2)
if len(parts) != 2 {
return fmt.Errorf("invalid timestamp in log message: %v", buf)
}
ts, err := time.Parse(time.RFC3339Nano, string(parts[0]))
if err != nil {
return errors.Wrap(err, "failed to parse timestamp")
}
tsp, err := gogotypes.TimestampProto(ts)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to convert timestamp") return errors.Wrap(err, "failed to convert timestamp")
} }
var stream api.LogStream
if msg.Source == "stdout" {
stream = api.LogStreamStdout
} else if msg.Source == "stderr" {
stream = api.LogStreamStderr
}
if err := publisher.Publish(ctx, api.LogMessage{ if err := publisher.Publish(ctx, api.LogMessage{
Context: msgctx, Context: msgctx,
Timestamp: tsp, Timestamp: tsp,
Stream: api.LogStream(stream), Stream: stream,
Data: msg.Line,
Data: parts[1],
}); err != nil { }); err != nil {
return errors.Wrap(err, "failed to publish log message") return errors.Wrap(err, "failed to publish log message")
} }

View file

@ -18,9 +18,6 @@ import (
types "github.com/docker/docker/api/types/swarm" types "github.com/docker/docker/api/types/swarm"
timetypes "github.com/docker/docker/api/types/time" timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/daemon/cluster/convert" "github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stdcopy"
runconfigopts "github.com/docker/docker/runconfig/opts" runconfigopts "github.com/docker/docker/runconfig/opts"
swarmapi "github.com/docker/swarmkit/api" swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types" gogotypes "github.com/gogo/protobuf/types"
@ -303,56 +300,44 @@ func (c *Cluster) RemoveService(input string) error {
} }
// ServiceLogs collects service logs and writes them back to `config.OutStream` // ServiceLogs collects service logs and writes them back to `config.OutStream`
func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *backend.ContainerLogsConfig, started chan struct{}) error { func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
c.mu.RLock() c.mu.RLock()
defer func() { defer c.mu.RUnlock()
select {
case <-started:
// if we've started streaming logs, we are no longer holding the
// lock and do not have to release it
return
default:
// before we start, though, we're holding this lock and it needs to
// be released
c.mu.RUnlock()
}
}()
state := c.currentNodeState() state := c.currentNodeState()
if !state.IsActiveManager() { if !state.IsActiveManager() {
return c.errNoManager(state) return nil, c.errNoManager(state)
} }
swarmSelector, tty, err := convertSelector(ctx, state.controlClient, selector) swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
if err != nil { if err != nil {
return errors.Wrap(err, "error making log selector") return nil, errors.Wrap(err, "error making log selector")
}
// TODO(dperny) this goes away when we support TTY logs, which is in the works
if tty {
return errors.New("service logs not supported on tasks with a TTY attached")
} }
// set the streams we'll use // set the streams we'll use
stdStreams := []swarmapi.LogStream{} stdStreams := []swarmapi.LogStream{}
if config.ContainerLogsOptions.ShowStdout { if config.ShowStdout {
stdStreams = append(stdStreams, swarmapi.LogStreamStdout) stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
} }
if config.ContainerLogsOptions.ShowStderr { if config.ShowStderr {
stdStreams = append(stdStreams, swarmapi.LogStreamStderr) stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
} }
// Get tail value squared away - the number of previous log lines we look at // Get tail value squared away - the number of previous log lines we look at
var tail int64 var tail int64
// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
// it to -1 (all). i don't agree with that, but i also think no tail value
// should be legitimate. if you don't pass tail, we assume you want "all"
if config.Tail == "all" || config.Tail == "" { if config.Tail == "all" || config.Tail == "" {
// tail of 0 means send all logs on the swarmkit side // tail of 0 means send all logs on the swarmkit side
tail = 0 tail = 0
} else { } else {
t, err := strconv.Atoi(config.Tail) t, err := strconv.Atoi(config.Tail)
if err != nil { if err != nil {
return errors.New("tail value must be a positive integer or \"all\"") return nil, errors.New("tail value must be a positive integer or \"all\"")
} }
if t < 0 { if t < 0 {
return errors.New("negative tail values not supported") return nil, errors.New("negative tail values not supported")
} }
// we actually use negative tail in swarmkit to represent messages // we actually use negative tail in swarmkit to represent messages
// backwards starting from the beginning. also, -1 means no logs. so, // backwards starting from the beginning. also, -1 means no logs. so,
@ -370,12 +355,12 @@ func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector
if config.Since != "" { if config.Since != "" {
s, n, err := timetypes.ParseTimestamps(config.Since, 0) s, n, err := timetypes.ParseTimestamps(config.Since, 0)
if err != nil { if err != nil {
return errors.Wrap(err, "could not parse since timestamp") return nil, errors.Wrap(err, "could not parse since timestamp")
} }
since := time.Unix(s, n) since := time.Unix(s, n)
sinceProto, err = gogotypes.TimestampProto(since) sinceProto, err = gogotypes.TimestampProto(since)
if err != nil { if err != nil {
return errors.Wrap(err, "could not parse timestamp to proto") return nil, errors.Wrap(err, "could not parse timestamp to proto")
} }
} }
@ -389,106 +374,96 @@ func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector
}, },
}) })
if err != nil { if err != nil {
return err return nil, err
} }
wf := ioutils.NewWriteFlusher(config.OutStream) messageChan := make(chan *backend.LogMessage, 1)
defer wf.Close() go func() {
defer close(messageChan)
// Release the lock before starting the stream. for {
// // Check the context before doing anything.
// this feels like it could be racy because we would double unlock if we select {
// somehow returned right after we unlocked but before we closed, but I do case <-ctx.Done():
// not think such a thing is possible. i wish it were possible to atomically return
// close and unlock but that might be overkill. programming is hard. default:
c.mu.RUnlock() }
close(started) subscribeMsg, err := stream.Recv()
if err == io.EOF {
wf.Flush() return
}
outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) // if we're not io.EOF, push the message in and return
errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) if err != nil {
select {
for { case <-ctx.Done():
// Check the context before doing anything. case messageChan <- &backend.LogMessage{Err: err}:
select {
case <-ctx.Done():
return ctx.Err()
default:
}
subscribeMsg, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
for _, msg := range subscribeMsg.Messages {
data := []byte{}
if config.Timestamps {
ts, err := gogotypes.TimestampFromProto(msg.Timestamp)
if err != nil {
return err
} }
data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) return
} }
data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", for _, msg := range subscribeMsg.Messages {
contextPrefix, msg.Context.NodeID, // make a new message
contextPrefix, msg.Context.ServiceID, m := new(backend.LogMessage)
contextPrefix, msg.Context.TaskID, m.Attrs = make(backend.LogAttributes)
))...) // add the timestamp, adding the error if it fails
m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
if err != nil {
m.Err = err
}
m.Attrs[contextPrefix+".node.id"] = msg.Context.NodeID
m.Attrs[contextPrefix+".service.id"] = msg.Context.ServiceID
m.Attrs[contextPrefix+".task.id"] = msg.Context.TaskID
switch msg.Stream {
case swarmapi.LogStreamStdout:
m.Source = "stdout"
case swarmapi.LogStreamStderr:
m.Source = "stderr"
}
m.Line = msg.Data
data = append(data, msg.Data...) // there could be a case where the reader stops accepting
// messages and the context is canceled. we need to check that
switch msg.Stream { // here, or otherwise we risk blocking forever on the message
case swarmapi.LogStreamStdout: // send.
outStream.Write(data) select {
case swarmapi.LogStreamStderr: case <-ctx.Done():
errStream.Write(data) return
case messageChan <- m:
}
} }
} }
} }()
return messageChan, nil
} }
// convertSelector takes a backend.LogSelector, which contains raw names that // convertSelector takes a backend.LogSelector, which contains raw names that
// may or may not be valid, and converts them to an api.LogSelector proto. It // may or may not be valid, and converts them to an api.LogSelector proto. It
// also returns a boolean, true if any of the services use a TTY (false // returns an error if something fails
// otherwise) and an error if something fails func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, bool, error) {
// if ANY tasks use a TTY, don't mux streams
var tty bool
// don't rely on swarmkit to resolve IDs, do it ourselves // don't rely on swarmkit to resolve IDs, do it ourselves
swarmSelector := &swarmapi.LogSelector{} swarmSelector := &swarmapi.LogSelector{}
for _, s := range selector.Services { for _, s := range selector.Services {
service, err := getService(ctx, cc, s) service, err := getService(ctx, cc, s)
if err != nil { if err != nil {
return nil, false, err return nil, err
} }
c := service.Spec.Task.GetContainer() c := service.Spec.Task.GetContainer()
if c == nil { if c == nil {
return nil, false, errors.New("logs only supported on container tasks") return nil, errors.New("logs only supported on container tasks")
} }
// set TTY true if we have a TTY service, or if it's already true
tty = tty || c.TTY
swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
} }
for _, t := range selector.Tasks { for _, t := range selector.Tasks {
task, err := getTask(ctx, cc, t) task, err := getTask(ctx, cc, t)
if err != nil { if err != nil {
return nil, false, err return nil, err
} }
c := task.Spec.GetContainer() c := task.Spec.GetContainer()
if c == nil { if c == nil {
return nil, false, errors.New("logs only supported on container tasks") return nil, errors.New("logs only supported on container tasks")
} }
tty = tty || c.TTY
swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
} }
return swarmSelector, tty, nil return swarmSelector, nil
} }
// imageWithDigestString takes an image such as name or name:tag // imageWithDigestString takes an image such as name or name:tag

View file

@ -9,11 +9,10 @@ package logger
import ( import (
"errors" "errors"
"sort"
"strings"
"sync" "sync"
"time" "time"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/jsonlog"
) )
@ -43,14 +42,13 @@ func PutMessage(msg *Message) {
// Message is datastructure that represents piece of output produced by some // Message is datastructure that represents piece of output produced by some
// container. The Line member is a slice of an array whose contents can be // container. The Line member is a slice of an array whose contents can be
// changed after a log driver's Log() method returns. // changed after a log driver's Log() method returns.
//
// Message is subtyped from backend.LogMessage because there is a lot of
// internal complexity around the Message type that should not be exposed
// to any package not explicitly importing the logger type.
//
// Any changes made to this struct must also be updated in the `reset` function // Any changes made to this struct must also be updated in the `reset` function
type Message struct { type Message backend.LogMessage
Line []byte
Source string
Timestamp time.Time
Attrs LogAttributes
Partial bool
}
// reset sets the message back to default values // reset sets the message back to default values
// This is used when putting a message back into the message pool. // This is used when putting a message back into the message pool.
@ -60,31 +58,20 @@ func (m *Message) reset() {
m.Source = "" m.Source = ""
m.Attrs = nil m.Attrs = nil
m.Partial = false m.Partial = false
m.Err = nil
}
// AsLogMessage returns a pointer to the message as a pointer to
// backend.LogMessage, which is an identical type with a different purpose
func (m *Message) AsLogMessage() *backend.LogMessage {
return (*backend.LogMessage)(m)
} }
// LogAttributes is used to hold the extra attributes available in the log message // LogAttributes is used to hold the extra attributes available in the log message
// Primarily used for converting the map type to string and sorting. // Primarily used for converting the map type to string and sorting.
type LogAttributes map[string]string // Imported here so it can be used internally with less refactoring
type byKey []string type LogAttributes backend.LogAttributes
func (s byKey) Len() int { return len(s) }
func (s byKey) Less(i, j int) bool {
keyI := strings.Split(s[i], "=")
keyJ := strings.Split(s[j], "=")
return keyI[0] < keyJ[0]
}
func (s byKey) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (a LogAttributes) String() string {
var ss byKey
for k, v := range a {
ss = append(ss, k+"="+v)
}
sort.Sort(ss)
return strings.Join(ss, ",")
}
// Logger is the interface for docker logging drivers. // Logger is the interface for docker logging drivers.
type Logger interface { type Logger interface {

View file

@ -2,48 +2,57 @@ package daemon
import ( import (
"errors" "errors"
"io"
"strconv" "strconv"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/backend"
containertypes "github.com/docker/docker/api/types/container" containertypes "github.com/docker/docker/api/types/container"
timetypes "github.com/docker/docker/api/types/time" timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/container" "github.com/docker/docker/container"
"github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stdcopy"
) )
// ContainerLogs hooks up a container's stdout and stderr streams // ContainerLogs copies the container's log channel to the channel provided in
// configured with the given struct. // the config. If ContainerLogs returns an error, no messages have been copied.
func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { // and the channel will be closed without data.
//
// if it returns nil, the config channel will be active and return log
// messages until it runs out or the context is canceled.
func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
lg := logrus.WithFields(logrus.Fields{
"module": "daemon",
"method": "(*Daemon).ContainerLogs",
"container": containerName,
})
if !(config.ShowStdout || config.ShowStderr) { if !(config.ShowStdout || config.ShowStderr) {
return errors.New("You must choose at least one stream") return nil, errors.New("You must choose at least one stream")
} }
container, err := daemon.GetContainer(containerName) container, err := daemon.GetContainer(containerName)
if err != nil { if err != nil {
return err return nil, err
} }
if container.RemovalInProgress || container.Dead { if container.RemovalInProgress || container.Dead {
return errors.New("can not get logs from container which is dead or marked for removal") return nil, errors.New("can not get logs from container which is dead or marked for removal")
} }
if container.HostConfig.LogConfig.Type == "none" { if container.HostConfig.LogConfig.Type == "none" {
return logger.ErrReadLogsNotSupported return nil, logger.ErrReadLogsNotSupported
} }
cLog, err := daemon.getLogger(container) cLog, err := daemon.getLogger(container)
if err != nil { if err != nil {
return err return nil, err
} }
logReader, ok := cLog.(logger.LogReader) logReader, ok := cLog.(logger.LogReader)
if !ok { if !ok {
return logger.ErrReadLogsNotSupported return nil, logger.ErrReadLogsNotSupported
} }
follow := config.Follow && container.IsRunning() follow := config.Follow && container.IsRunning()
@ -52,76 +61,91 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
tailLines = -1 tailLines = -1
} }
logrus.Debug("logs: begin stream")
var since time.Time var since time.Time
if config.Since != "" { if config.Since != "" {
s, n, err := timetypes.ParseTimestamps(config.Since, 0) s, n, err := timetypes.ParseTimestamps(config.Since, 0)
if err != nil { if err != nil {
return err return nil, err
} }
since = time.Unix(s, n) since = time.Unix(s, n)
} }
readConfig := logger.ReadConfig{ readConfig := logger.ReadConfig{
Since: since, Since: since,
Tail: tailLines, Tail: tailLines,
Follow: follow, Follow: follow,
} }
logs := logReader.ReadLogs(readConfig) logs := logReader.ReadLogs(readConfig)
// Close logWatcher on exit
defer func() { // past this point, we can't possibly return any errors, so we can just
logs.Close() // start a goroutine and return to tell the caller not to expect errors
if cLog != container.LogDriver { // (if the caller wants to give up on logs, they have to cancel the context)
// Since the logger isn't cached in the container, which // this goroutine functions as a shim between the logger and the caller.
// occurs if it is running, it must get explicitly closed messageChan := make(chan *backend.LogMessage, 1)
// here to avoid leaking it and any file handles it has. go func() {
if err := cLog.Close(); err != nil { // set up some defers
logrus.Errorf("Error closing logger: %v", err) defer func() {
// ok so this function, originally, was placed right after that
// logger.ReadLogs call above. I THINK that means it sets off the
// chain of events that results in the logger needing to be closed.
// i do not know if an error in time parsing above causing an early
// return will result in leaking the logger. if that is the case,
// it would also have been a bug in the original code
logs.Close()
if cLog != container.LogDriver {
// Since the logger isn't cached in the container, which
// occurs if it is running, it must get explicitly closed
// here to avoid leaking it and any file handles it has.
if err := cLog.Close(); err != nil {
logrus.Errorf("Error closing logger: %v", err)
}
}
}()
// close the messages channel. closing is the only way to signal above
// that we're doing with logs (other than context cancel i guess).
defer close(messageChan)
lg.Debug("begin logs")
for {
select {
// i do not believe as the system is currently designed any error
// is possible, but we should be prepared to handle it anyway. if
// we do get an error, copy only the error field to a new object so
// we don't end up with partial data in the other fields
case err := <-logs.Err:
lg.Errorf("Error streaming logs: %v", err)
select {
case <-ctx.Done():
case messageChan <- &backend.LogMessage{Err: err}:
}
return
case <-ctx.Done():
lg.Debug("logs: end stream, ctx is done: %v", ctx.Err())
return
case msg, ok := <-logs.Msg:
// there is some kind of pool or ring buffer in the logger that
// produces these messages, and a possible future optimization
// might be to use that pool and reuse message objects
if !ok {
lg.Debug("end logs")
return
}
m := msg.AsLogMessage() // just a pointer conversion, does not copy data
// there could be a case where the reader stops accepting
// messages and the context is canceled. we need to check that
// here, or otherwise we risk blocking forever on the message
// send.
select {
case <-ctx.Done():
return
case messageChan <- m:
}
} }
} }
}() }()
return messageChan, nil
wf := ioutils.NewWriteFlusher(config.OutStream)
defer wf.Close()
close(started)
wf.Flush()
var outStream io.Writer
outStream = wf
errStream := outStream
if !container.Config.Tty {
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
for {
select {
case err := <-logs.Err:
logrus.Errorf("Error streaming logs: %v", err)
return nil
case <-ctx.Done():
logrus.Debugf("logs: end stream, ctx is done: %v", ctx.Err())
return nil
case msg, ok := <-logs.Msg:
if !ok {
logrus.Debug("logs: end stream")
return nil
}
logLine := msg.Line
if config.Details {
logLine = append([]byte(msg.Attrs.String()+" "), logLine...)
}
if config.Timestamps {
logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...)
}
if msg.Source == "stdout" && config.ShowStdout {
outStream.Write(logLine)
}
if msg.Source == "stderr" && config.ShowStderr {
errStream.Write(logLine)
}
}
}
} }
func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {

View file

@ -254,3 +254,43 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) {
} }
} }
} }
func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) {
testRequires(c, ExperimentalDaemon)
d := s.AddDaemon(c, true, true)
name := "TestServiceLogsTTY"
result := icmd.RunCmd(d.Command(
// create a service
"service", "create",
// name it $name
"--name", name,
// use a TTY
"-t",
// busybox image, shell string
"busybox", "sh", "-c",
// echo to stdout and stderr
"echo out; (echo err 1>&2); sleep 10000",
))
result.Assert(c, icmd.Expected{})
id := strings.TrimSpace(result.Stdout())
c.Assert(id, checker.Not(checker.Equals), "")
// so, right here, we're basically inspecting by id and returning only
// the ID. if they don't match, the service doesn't exist.
result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id))
result.Assert(c, icmd.Expected{Out: id})
// make sure task has been deployed.
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
// and make sure we have all the log lines
waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2)
cmd := d.Command("service", "logs", name)
result = icmd.RunCmd(cmd)
// for some reason there is carriage return in the output. i think this is
// just expected.
c.Assert(result, icmd.Matches, icmd.Expected{Out: "out\r\nerr\r\n"})
}