remove useless flush method

Docker-DCO-1.1-Signed-off-by: Victor Vieux <victor.vieux@docker.com> (github: vieux)
This commit is contained in:
Victor Vieux 2014-01-23 16:00:07 -08:00
parent 90e9a2d85a
commit 35641f0ec7
3 changed files with 6 additions and 20 deletions

2
api.go
View File

@ -448,7 +448,7 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht
}
job.SetenvBool("json", version > 1.0)
job.Stdout.Add(w)
job.Stdout.Add(utils.NewWriteFlusher(w))
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err

View File

@ -109,18 +109,6 @@ func (o *Output) Write(p []byte) (n int, err error) {
return len(p), firstErr
}
func (o *Output) Flush() {
o.Mutex.Lock()
defer o.Mutex.Unlock()
for _, dst := range o.dests {
if f, ok := dst.(interface {
Flush()
}); ok {
f.Flush()
}
}
}
// Close unregisters all destinations and waits for all background
// AddTail and AddString tasks to complete.
// The Close method of each destination is called if it exists.

View File

@ -1323,7 +1323,6 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
localName = job.Args[0]
tag string
sf = utils.NewStreamFormatter(job.GetenvBool("json"))
out = utils.NewWriteFlusher(job.Stdout)
authConfig = &auth.AuthConfig{}
metaHeaders map[string][]string
)
@ -1338,7 +1337,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
if err != nil {
if c != nil {
// Another pull of the same repository is already taking place; just wait for it to finish
out.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
<-c
return engine.StatusOK
}
@ -1365,7 +1364,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
localName = remoteName
}
if err = srv.pullRepository(r, out, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
job.Error(err)
return engine.StatusErr
}
@ -1584,7 +1583,6 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
repo = job.Args[1]
tag string
sf = utils.NewStreamFormatter(job.GetenvBool("json"))
out = utils.NewWriteFlusher(job.Stdout)
archive io.Reader
resp *http.Response
)
@ -1605,7 +1603,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
u.Host = src
u.Path = ""
}
out.Write(sf.FormatStatus("", "Downloading from %s", u))
job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
// Download with curl (pretty progress bar)
// If curl is not available, fallback to http.Get()
resp, err = utils.Download(u.String())
@ -1613,7 +1611,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
job.Error(err)
return engine.StatusErr
}
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf, true, "", "Importing")
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
if err != nil {
@ -1627,7 +1625,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
return engine.StatusErr
}
}
out.Write(sf.FormatStatus("", img.ID))
job.Stdout.Write(sf.FormatStatus("", img.ID))
return engine.StatusOK
}