2014-07-02 10:48:37 -04:00
|
|
|
package broadcastwriter
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2014-10-24 13:12:35 -04:00
|
|
|
log "github.com/Sirupsen/logrus"
|
2014-10-24 18:11:48 -04:00
|
|
|
"github.com/docker/docker/pkg/jsonlog"
|
2014-07-02 10:48:37 -04:00
|
|
|
)
|
|
|
|
|
2014-07-02 15:28:08 -04:00
|
|
|
// BroadcastWriter accumulate multiple io.WriteCloser by stream.
|
2014-07-02 10:48:37 -04:00
|
|
|
type BroadcastWriter struct {
|
|
|
|
sync.Mutex
|
2014-09-15 12:35:51 -04:00
|
|
|
buf *bytes.Buffer
|
|
|
|
jsLogBuf *bytes.Buffer
|
|
|
|
streams map[string](map[io.WriteCloser]struct{})
|
2014-07-02 10:48:37 -04:00
|
|
|
}
|
|
|
|
|
2014-07-02 15:28:08 -04:00
|
|
|
// AddWriter adds new io.WriteCloser for stream.
|
|
|
|
// If stream is "", then all writes proceed as is. Otherwise every line from
|
2014-07-30 11:39:03 -04:00
|
|
|
// input will be packed to serialized jsonlog.JSONLog.
|
2014-07-02 10:48:37 -04:00
|
|
|
func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
|
|
|
|
w.Lock()
|
|
|
|
if _, ok := w.streams[stream]; !ok {
|
|
|
|
w.streams[stream] = make(map[io.WriteCloser]struct{})
|
|
|
|
}
|
|
|
|
w.streams[stream][writer] = struct{}{}
|
|
|
|
w.Unlock()
|
|
|
|
}
|
|
|
|
|
2014-07-02 15:28:08 -04:00
|
|
|
// Write writes bytes to all writers. Failed writers will be evicted during
|
|
|
|
// this call.
|
2014-07-02 10:48:37 -04:00
|
|
|
func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
|
|
|
|
created := time.Now().UTC()
|
|
|
|
w.Lock()
|
|
|
|
if writers, ok := w.streams[""]; ok {
|
|
|
|
for sw := range writers {
|
|
|
|
if n, err := sw.Write(p); err != nil || n != len(p) {
|
|
|
|
// On error, evict the writer
|
|
|
|
delete(writers, sw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-15 12:35:51 -04:00
|
|
|
if w.jsLogBuf == nil {
|
|
|
|
w.jsLogBuf = new(bytes.Buffer)
|
|
|
|
w.jsLogBuf.Grow(1024)
|
|
|
|
}
|
2014-07-02 10:48:37 -04:00
|
|
|
w.buf.Write(p)
|
|
|
|
for {
|
|
|
|
line, err := w.buf.ReadString('\n')
|
|
|
|
if err != nil {
|
|
|
|
w.buf.Write([]byte(line))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for stream, writers := range w.streams {
|
|
|
|
if stream == "" {
|
|
|
|
continue
|
|
|
|
}
|
2014-09-15 12:35:51 -04:00
|
|
|
jsonLog := jsonlog.JSONLog{Log: line, Stream: stream, Created: created}
|
|
|
|
err = jsonLog.MarshalJSONBuf(w.jsLogBuf)
|
2014-07-02 15:28:08 -04:00
|
|
|
if err != nil {
|
2014-07-24 16:37:44 -04:00
|
|
|
log.Errorf("Error making JSON log line: %s", err)
|
2014-07-02 15:28:08 -04:00
|
|
|
continue
|
2014-07-02 10:48:37 -04:00
|
|
|
}
|
2014-09-15 12:35:51 -04:00
|
|
|
w.jsLogBuf.WriteByte('\n')
|
|
|
|
b := w.jsLogBuf.Bytes()
|
2014-07-02 10:48:37 -04:00
|
|
|
for sw := range writers {
|
2014-07-02 15:28:08 -04:00
|
|
|
if _, err := sw.Write(b); err != nil {
|
2014-07-02 10:48:37 -04:00
|
|
|
delete(writers, sw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-15 12:35:51 -04:00
|
|
|
w.jsLogBuf.Reset()
|
2014-07-02 10:48:37 -04:00
|
|
|
}
|
2014-09-15 12:35:51 -04:00
|
|
|
w.jsLogBuf.Reset()
|
2014-07-02 15:28:08 -04:00
|
|
|
w.Unlock()
|
2014-07-02 10:48:37 -04:00
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2014-07-02 15:28:08 -04:00
|
|
|
// Clean closes and removes all writers. Last non-eol-terminated part of data
|
|
|
|
// will be saved.
|
|
|
|
func (w *BroadcastWriter) Clean() error {
|
2014-07-02 10:48:37 -04:00
|
|
|
w.Lock()
|
|
|
|
for _, writers := range w.streams {
|
|
|
|
for w := range writers {
|
|
|
|
w.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w.streams = make(map[string](map[io.WriteCloser]struct{}))
|
2014-07-02 15:28:08 -04:00
|
|
|
w.Unlock()
|
2014-07-02 10:48:37 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func New() *BroadcastWriter {
|
|
|
|
return &BroadcastWriter{
|
|
|
|
streams: make(map[string](map[io.WriteCloser]struct{})),
|
|
|
|
buf: bytes.NewBuffer(nil),
|
|
|
|
}
|
|
|
|
}
|