1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #16803 from tiborvass/pkg-broadcaster

Move types from progressreader and broadcastwriter to broadcaster
This commit is contained in:
Jess Frazelle 2015-10-08 13:51:08 -07:00
commit bea2257f92
9 changed files with 62 additions and 66 deletions

View file

@ -22,7 +22,7 @@ import (
derr "github.com/docker/docker/errors" derr "github.com/docker/docker/errors"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/mount"
@ -41,8 +41,8 @@ var (
) )
type streamConfig struct { type streamConfig struct {
stdout *broadcastwriter.BroadcastWriter stdout *broadcaster.Unbuffered
stderr *broadcastwriter.BroadcastWriter stderr *broadcaster.Unbuffered
stdin io.ReadCloser stdin io.ReadCloser
stdinPipe io.WriteCloser stdinPipe io.WriteCloser
} }
@ -318,13 +318,13 @@ func (streamConfig *streamConfig) StdinPipe() io.WriteCloser {
func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser { func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser {
reader, writer := io.Pipe() reader, writer := io.Pipe()
streamConfig.stdout.AddWriter(writer) streamConfig.stdout.Add(writer)
return ioutils.NewBufReader(reader) return ioutils.NewBufReader(reader)
} }
func (streamConfig *streamConfig) StderrPipe() io.ReadCloser { func (streamConfig *streamConfig) StderrPipe() io.ReadCloser {
reader, writer := io.Pipe() reader, writer := io.Pipe()
streamConfig.stderr.AddWriter(writer) streamConfig.stderr.Add(writer)
return ioutils.NewBufReader(reader) return ioutils.NewBufReader(reader)
} }

View file

@ -32,7 +32,7 @@ import (
"github.com/docker/docker/graph" "github.com/docker/docker/graph"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/discovery" "github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/graphdb"
@ -194,8 +194,8 @@ func (daemon *Daemon) Register(container *Container) error {
container.daemon = daemon container.daemon = daemon
// Attach to stdout and stderr // Attach to stdout and stderr
container.stderr = broadcastwriter.New() container.stderr = new(broadcaster.Unbuffered)
container.stdout = broadcastwriter.New() container.stdout = new(broadcaster.Unbuffered)
// Attach to stdin // Attach to stdin
if container.Config.OpenStdin { if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe() container.stdin, container.stdinPipe = io.Pipe()

View file

@ -10,7 +10,7 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors" derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
@ -233,8 +233,8 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
cStderr = stderr cStderr = stderr
} }
ec.streamConfig.stderr = broadcastwriter.New() ec.streamConfig.stderr = new(broadcaster.Unbuffered)
ec.streamConfig.stdout = broadcastwriter.New() ec.streamConfig.stdout = new(broadcaster.Unbuffered)
// Attach to stdin // Attach to stdin
if ec.OpenStdin { if ec.OpenStdin {
ec.streamConfig.stdin, ec.streamConfig.stdinPipe = io.Pipe() ec.streamConfig.stdin, ec.streamConfig.stdinPipe = io.Pipe()

View file

@ -3,7 +3,7 @@ package graph
import ( import (
"testing" "testing"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
) )
@ -13,8 +13,8 @@ func init() {
func TestPools(t *testing.T) { func TestPools(t *testing.T) {
s := &TagStore{ s := &TagStore{
pullingPool: make(map[string]*progressreader.Broadcaster), pullingPool: make(map[string]*broadcaster.Buffered),
pushingPool: make(map[string]*progressreader.Broadcaster), pushingPool: make(map[string]*broadcaster.Buffered),
} }
if _, found := s.poolAdd("pull", "test1"); found { if _, found := s.poolAdd("pull", "test1"); found {

View file

@ -11,6 +11,7 @@ import (
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
@ -110,7 +111,7 @@ type downloadInfo struct {
size int64 size int64
err chan error err chan error
poolKey string poolKey string
broadcaster *progressreader.Broadcaster broadcaster *broadcaster.Buffered
} }
type errVerification struct{} type errVerification struct{}

View file

@ -16,8 +16,8 @@ import (
"github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/events"
"github.com/docker/docker/graph/tags" "github.com/docker/docker/graph/tags"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/trust" "github.com/docker/docker/trust"
@ -37,8 +37,8 @@ type TagStore struct {
sync.Mutex sync.Mutex
// FIXME: move push/pull-related fields // FIXME: move push/pull-related fields
// to a helper type // to a helper type
pullingPool map[string]*progressreader.Broadcaster pullingPool map[string]*broadcaster.Buffered
pushingPool map[string]*progressreader.Broadcaster pushingPool map[string]*broadcaster.Buffered
registryService *registry.Service registryService *registry.Service
eventsService *events.Events eventsService *events.Events
trustService *trust.Store trustService *trust.Store
@ -94,8 +94,8 @@ func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) {
graph: cfg.Graph, graph: cfg.Graph,
trustKey: cfg.Key, trustKey: cfg.Key,
Repositories: make(map[string]Repository), Repositories: make(map[string]Repository),
pullingPool: make(map[string]*progressreader.Broadcaster), pullingPool: make(map[string]*broadcaster.Buffered),
pushingPool: make(map[string]*progressreader.Broadcaster), pushingPool: make(map[string]*broadcaster.Buffered),
registryService: cfg.Registry, registryService: cfg.Registry,
eventsService: cfg.Events, eventsService: cfg.Events,
trustService: cfg.Trust, trustService: cfg.Trust,
@ -437,7 +437,7 @@ func validateDigest(dgst string) error {
// poolAdd checks if a push or pull is already running, and returns // poolAdd checks if a push or pull is already running, and returns
// (broadcaster, true) if a running operation is found. Otherwise, it creates a // (broadcaster, true) if a running operation is found. Otherwise, it creates a
// new one and returns (broadcaster, false). // new one and returns (broadcaster, false).
func (store *TagStore) poolAdd(kind, key string) (*progressreader.Broadcaster, bool) { func (store *TagStore) poolAdd(kind, key string) (*broadcaster.Buffered, bool) {
store.Lock() store.Lock()
defer store.Unlock() defer store.Unlock()
@ -448,7 +448,7 @@ func (store *TagStore) poolAdd(kind, key string) (*progressreader.Broadcaster, b
return p, true return p, true
} }
broadcaster := progressreader.NewBroadcaster() broadcaster := broadcaster.NewBuffered()
switch kind { switch kind {
case "pull": case "pull":

View file

@ -1,4 +1,4 @@
package progressreader package broadcaster
import ( import (
"errors" "errors"
@ -6,10 +6,10 @@ import (
"sync" "sync"
) )
// Broadcaster keeps track of one or more observers watching the progress // Buffered keeps track of one or more observers watching the progress
// of an operation. For example, if multiple clients are trying to pull an // of an operation. For example, if multiple clients are trying to pull an
// image, they share a Broadcaster for the download operation. // image, they share a Buffered struct for the download operation.
type Broadcaster struct { type Buffered struct {
sync.Mutex sync.Mutex
// c is a channel that observers block on, waiting for the operation // c is a channel that observers block on, waiting for the operation
// to finish. // to finish.
@ -29,9 +29,9 @@ type Broadcaster struct {
result error result error
} }
// NewBroadcaster returns a Broadcaster structure // NewBuffered returns an initialized Buffered structure.
func NewBroadcaster() *Broadcaster { func NewBuffered() *Buffered {
b := &Broadcaster{ b := &Buffered{
c: make(chan struct{}), c: make(chan struct{}),
} }
b.cond = sync.NewCond(b) b.cond = sync.NewCond(b)
@ -39,7 +39,7 @@ func NewBroadcaster() *Broadcaster {
} }
// closed returns true if and only if the broadcaster has been closed // closed returns true if and only if the broadcaster has been closed
func (broadcaster *Broadcaster) closed() bool { func (broadcaster *Buffered) closed() bool {
select { select {
case <-broadcaster.c: case <-broadcaster.c:
return true return true
@ -51,7 +51,7 @@ func (broadcaster *Broadcaster) closed() bool {
// receiveWrites runs as a goroutine so that writes don't block the Write // receiveWrites runs as a goroutine so that writes don't block the Write
// function. It writes the new data in broadcaster.history each time there's // function. It writes the new data in broadcaster.history each time there's
// activity on the broadcaster.cond condition variable. // activity on the broadcaster.cond condition variable.
func (broadcaster *Broadcaster) receiveWrites(observer io.Writer) { func (broadcaster *Buffered) receiveWrites(observer io.Writer) {
n := 0 n := 0
broadcaster.Lock() broadcaster.Lock()
@ -98,13 +98,13 @@ func (broadcaster *Broadcaster) receiveWrites(observer io.Writer) {
// Write adds data to the history buffer, and also writes it to all current // Write adds data to the history buffer, and also writes it to all current
// observers. // observers.
func (broadcaster *Broadcaster) Write(p []byte) (n int, err error) { func (broadcaster *Buffered) Write(p []byte) (n int, err error) {
broadcaster.Lock() broadcaster.Lock()
defer broadcaster.Unlock() defer broadcaster.Unlock()
// Is the broadcaster closed? If so, the write should fail. // Is the broadcaster closed? If so, the write should fail.
if broadcaster.closed() { if broadcaster.closed() {
return 0, errors.New("attempted write to closed progressreader Broadcaster") return 0, errors.New("attempted write to a closed broadcaster.Buffered")
} }
// Add message in p to the history slice // Add message in p to the history slice
@ -117,15 +117,15 @@ func (broadcaster *Broadcaster) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Add adds an observer to the Broadcaster. The new observer receives the // Add adds an observer to the broadcaster. The new observer receives the
// data from the history buffer, and also all subsequent data. // data from the history buffer, and also all subsequent data.
func (broadcaster *Broadcaster) Add(w io.Writer) error { func (broadcaster *Buffered) Add(w io.Writer) error {
// The lock is acquired here so that Add can't race with Close // The lock is acquired here so that Add can't race with Close
broadcaster.Lock() broadcaster.Lock()
defer broadcaster.Unlock() defer broadcaster.Unlock()
if broadcaster.closed() { if broadcaster.closed() {
return errors.New("attempted to add observer to closed progressreader Broadcaster") return errors.New("attempted to add observer to a closed broadcaster.Buffered")
} }
broadcaster.wg.Add(1) broadcaster.wg.Add(1)
@ -136,7 +136,7 @@ func (broadcaster *Broadcaster) Add(w io.Writer) error {
// CloseWithError signals to all observers that the operation has finished. Its // CloseWithError signals to all observers that the operation has finished. Its
// argument is a result that should be returned to waiters blocking on Wait. // argument is a result that should be returned to waiters blocking on Wait.
func (broadcaster *Broadcaster) CloseWithError(result error) { func (broadcaster *Buffered) CloseWithError(result error) {
broadcaster.Lock() broadcaster.Lock()
if broadcaster.closed() { if broadcaster.closed() {
broadcaster.Unlock() broadcaster.Unlock()
@ -153,14 +153,14 @@ func (broadcaster *Broadcaster) CloseWithError(result error) {
// Close signals to all observers that the operation has finished. It causes // Close signals to all observers that the operation has finished. It causes
// all calls to Wait to return nil. // all calls to Wait to return nil.
func (broadcaster *Broadcaster) Close() { func (broadcaster *Buffered) Close() {
broadcaster.CloseWithError(nil) broadcaster.CloseWithError(nil)
} }
// Wait blocks until the operation is marked as completed by the Close method, // Wait blocks until the operation is marked as completed by the Close method,
// and all writer goroutines have completed. It returns the argument that was // and all writer goroutines have completed. It returns the argument that was
// passed to Close. // passed to Close.
func (broadcaster *Broadcaster) Wait() error { func (broadcaster *Buffered) Wait() error {
<-broadcaster.c <-broadcaster.c
broadcaster.wg.Wait() broadcaster.wg.Wait()
return broadcaster.result return broadcaster.result

View file

@ -1,18 +1,18 @@
package broadcastwriter package broadcaster
import ( import (
"io" "io"
"sync" "sync"
) )
// BroadcastWriter accumulate multiple io.WriteCloser by stream. // Unbuffered accumulates multiple io.WriteCloser by stream.
type BroadcastWriter struct { type Unbuffered struct {
mu sync.Mutex mu sync.Mutex
writers []io.WriteCloser writers []io.WriteCloser
} }
// AddWriter adds new io.WriteCloser. // Add adds new io.WriteCloser.
func (w *BroadcastWriter) AddWriter(writer io.WriteCloser) { func (w *Unbuffered) Add(writer io.WriteCloser) {
w.mu.Lock() w.mu.Lock()
w.writers = append(w.writers, writer) w.writers = append(w.writers, writer)
w.mu.Unlock() w.mu.Unlock()
@ -20,7 +20,7 @@ func (w *BroadcastWriter) AddWriter(writer io.WriteCloser) {
// Write writes bytes to all writers. Failed writers will be evicted during // Write writes bytes to all writers. Failed writers will be evicted during
// this call. // this call.
func (w *BroadcastWriter) Write(p []byte) (n int, err error) { func (w *Unbuffered) Write(p []byte) (n int, err error) {
w.mu.Lock() w.mu.Lock()
var evict []int var evict []int
for i, sw := range w.writers { for i, sw := range w.writers {
@ -38,7 +38,7 @@ func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
// Clean closes and removes all writers. Last non-eol-terminated part of data // Clean closes and removes all writers. Last non-eol-terminated part of data
// will be saved. // will be saved.
func (w *BroadcastWriter) Clean() error { func (w *Unbuffered) Clean() error {
w.mu.Lock() w.mu.Lock()
for _, sw := range w.writers { for _, sw := range w.writers {
sw.Close() sw.Close()
@ -47,8 +47,3 @@ func (w *BroadcastWriter) Clean() error {
w.mu.Unlock() w.mu.Unlock()
return nil return nil
} }
// New creates a new BroadcastWriter.
func New() *BroadcastWriter {
return &BroadcastWriter{}
}

View file

@ -1,4 +1,4 @@
package broadcastwriter package broadcaster
import ( import (
"bytes" "bytes"
@ -28,14 +28,14 @@ func (dw *dummyWriter) Close() error {
return nil return nil
} }
func TestBroadcastWriter(t *testing.T) { func TestUnbuffered(t *testing.T) {
writer := New() writer := new(Unbuffered)
// Test 1: Both bufferA and bufferB should contain "foo" // Test 1: Both bufferA and bufferB should contain "foo"
bufferA := &dummyWriter{} bufferA := &dummyWriter{}
writer.AddWriter(bufferA) writer.Add(bufferA)
bufferB := &dummyWriter{} bufferB := &dummyWriter{}
writer.AddWriter(bufferB) writer.Add(bufferB)
writer.Write([]byte("foo")) writer.Write([]byte("foo"))
if bufferA.String() != "foo" { if bufferA.String() != "foo" {
@ -49,7 +49,7 @@ func TestBroadcastWriter(t *testing.T) {
// Test2: bufferA and bufferB should contain "foobar", // Test2: bufferA and bufferB should contain "foobar",
// while bufferC should only contain "bar" // while bufferC should only contain "bar"
bufferC := &dummyWriter{} bufferC := &dummyWriter{}
writer.AddWriter(bufferC) writer.Add(bufferC)
writer.Write([]byte("bar")) writer.Write([]byte("bar"))
if bufferA.String() != "foobar" { if bufferA.String() != "foobar" {
@ -87,7 +87,7 @@ func TestBroadcastWriter(t *testing.T) {
bufferB.failOnWrite = true bufferB.failOnWrite = true
bufferC.failOnWrite = true bufferC.failOnWrite = true
bufferD := &dummyWriter{} bufferD := &dummyWriter{}
writer.AddWriter(bufferD) writer.Add(bufferD)
writer.Write([]byte("yo")) writer.Write([]byte("yo"))
writer.Write([]byte("ink")) writer.Write([]byte("ink"))
if strings.Contains(bufferB.String(), "yoink") { if strings.Contains(bufferB.String(), "yoink") {
@ -114,24 +114,24 @@ func (d devNullCloser) Write(buf []byte) (int, error) {
} }
// This test checks for races. It is only useful when run with the race detector. // This test checks for races. It is only useful when run with the race detector.
func TestRaceBroadcastWriter(t *testing.T) { func TestRaceUnbuffered(t *testing.T) {
writer := New() writer := new(Unbuffered)
c := make(chan bool) c := make(chan bool)
go func() { go func() {
writer.AddWriter(devNullCloser(0)) writer.Add(devNullCloser(0))
c <- true c <- true
}() }()
writer.Write([]byte("hello")) writer.Write([]byte("hello"))
<-c <-c
} }
func BenchmarkBroadcastWriter(b *testing.B) { func BenchmarkUnbuffered(b *testing.B) {
writer := New() writer := new(Unbuffered)
setUpWriter := func() { setUpWriter := func() {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
writer.AddWriter(devNullCloser(0)) writer.Add(devNullCloser(0))
writer.AddWriter(devNullCloser(0)) writer.Add(devNullCloser(0))
writer.AddWriter(devNullCloser(0)) writer.Add(devNullCloser(0))
} }
} }
testLine := "Line that thinks that it is log line from docker" testLine := "Line that thinks that it is log line from docker"