mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Use Timeout Conn wrapper to set read deadline for downloading layer
Docker-DCO-1.1-Signed-off-by: Derek <crq@kernel.org> (github: crquan)
This commit is contained in:
parent
77ae37a383
commit
02f4ae6c56
3 changed files with 70 additions and 11 deletions
|
@ -726,7 +726,17 @@ type Registry struct {
|
|||
}
|
||||
|
||||
func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) {
|
||||
httpDial := func(proto string, addr string) (net.Conn, error) {
|
||||
conn, err := net.Dial(proto, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn = utils.NewTimeoutConn(conn, time.Duration(1)*time.Minute)
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
httpTransport := &http.Transport{
|
||||
Dial: httpDial,
|
||||
DisableKeepAlives: true,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
@ -738,6 +748,7 @@ func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, inde
|
|||
},
|
||||
indexEndpoint: indexEndpoint,
|
||||
}
|
||||
|
||||
r.client.Jar, err = cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -1134,17 +1135,38 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
|
|||
}
|
||||
}
|
||||
|
||||
for j := 1; j <= retries; j++ {
|
||||
// Get the layer
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil))
|
||||
status := "Pulling fs layer"
|
||||
if j > 1 {
|
||||
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
|
||||
}
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
|
||||
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
|
||||
if err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
err = uerr.Err
|
||||
}
|
||||
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
|
||||
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
||||
continue
|
||||
} else if err != nil {
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
|
||||
return err
|
||||
}
|
||||
defer layer.Close()
|
||||
if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
||||
|
||||
err = srv.daemon.Graph().Register(imgJSON,
|
||||
utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"),
|
||||
img)
|
||||
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
|
||||
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
||||
continue
|
||||
} else if err != nil {
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
|
||||
return err
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
|
||||
|
|
26
utils/timeoutconn.go
Normal file
26
utils/timeoutconn.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewTimeoutConn(conn net.Conn, timeout time.Duration) net.Conn {
|
||||
return &TimeoutConn{conn, timeout}
|
||||
}
|
||||
|
||||
// A net.Conn that sets a deadline for every Read or Write operation
|
||||
type TimeoutConn struct {
|
||||
net.Conn
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (c *TimeoutConn) Read(b []byte) (int, error) {
|
||||
if c.timeout > 0 {
|
||||
err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return c.Conn.Read(b)
|
||||
}
|
Loading…
Reference in a new issue