1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #29684 from vdemeester/quick-unit

Enhance pkg/{httputils,integration}, distribution/xfer unit tests
This commit is contained in:
Brian Goff 2016-12-28 10:57:56 -05:00 committed by GitHub
commit 631f51015e
8 changed files with 42 additions and 25 deletions

View file

@ -39,6 +39,7 @@ func TestNoneHealthcheck(t *testing.T) {
}
}
// FIXME(vdemeester) This takes around 3s… This is *way* too long
func TestHealthStates(t *testing.T) {
e := events.New()
_, l, _ := e.Subscribe()

View file

@ -22,8 +22,9 @@ const maxDownloadAttempts = 5
// registers and downloads those, taking into account dependencies between
// layers.
type LayerDownloadManager struct {
layerStore layer.Store
tm TransferManager
layerStore layer.Store
tm TransferManager
waitDuration time.Duration
}
// SetConcurrency sets the max concurrent downloads for each pull
@ -32,11 +33,16 @@ func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
}
// NewLayerDownloadManager returns a new LayerDownloadManager.
func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager {
return &LayerDownloadManager{
layerStore: layerStore,
tm: NewTransferManager(concurrencyLimit),
func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
manager := LayerDownloadManager{
layerStore: layerStore,
tm: NewTransferManager(concurrencyLimit),
waitDuration: time.Second,
}
for _, option := range options {
option(&manager)
}
return &manager
}
type downloadTransfer struct {
@ -269,7 +275,7 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor,
logrus.Errorf("Download failed, retrying: %v", err)
delay := retries * 5
ticker := time.NewTicker(time.Second)
ticker := time.NewTicker(ldm.waitDuration)
selectLoop:
for {

View file

@ -265,8 +265,9 @@ func TestSuccessfulDownload(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Needs fixing on Windows")
}
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency)
ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
@ -327,7 +328,7 @@ func TestSuccessfulDownload(t *testing.T) {
}
func TestCancelledDownload(t *testing.T) {
ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency)
ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})

View file

@ -16,7 +16,8 @@ const maxUploadAttempts = 5
// LayerUploadManager provides task management and progress reporting for
// uploads.
type LayerUploadManager struct {
tm TransferManager
tm TransferManager
waitDuration time.Duration
}
// SetConcurrency sets the max concurrent uploads for each push
@ -25,10 +26,15 @@ func (lum *LayerUploadManager) SetConcurrency(concurrency int) {
}
// NewLayerUploadManager returns a new LayerUploadManager.
func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager {
return &LayerUploadManager{
tm: NewTransferManager(concurrencyLimit),
func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager {
manager := LayerUploadManager{
tm: NewTransferManager(concurrencyLimit),
waitDuration: time.Second,
}
for _, option := range options {
option(&manager)
}
return &manager
}
type uploadTransfer struct {
@ -142,7 +148,7 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
logrus.Errorf("Upload failed, retrying: %v", err)
delay := retries * 5
ticker := time.NewTicker(time.Second)
ticker := time.NewTicker(lum.waitDuration)
selectLoop:
for {

View file

@ -79,7 +79,7 @@ func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
}
func TestSuccessfulUpload(t *testing.T) {
lum := NewLayerUploadManager(maxUploadConcurrency)
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})
@ -105,7 +105,7 @@ func TestSuccessfulUpload(t *testing.T) {
}
func TestCancelledUpload(t *testing.T) {
lum := NewLayerUploadManager(maxUploadConcurrency)
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress)
progressDone := make(chan struct{})

View file

@ -17,19 +17,20 @@ type resumableRequestReader struct {
currentResponse *http.Response
failures uint32
maxFailures uint32
waitDuration time.Duration
}
// ResumableRequestReader makes it possible to resume reading a request's body transparently
// maxfail is the number of times we retry to make requests again (not resumes)
// totalsize is the total length of the body; auto detect if not provided
func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second}
}
// ResumableRequestReaderWithInitialResponse makes it possible to resume
// reading the body of an already initiated request.
func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second}
}
func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
@ -40,7 +41,7 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
if r.lastRange != 0 && r.currentResponse == nil {
readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
r.request.Header.Set("Range", readRange)
time.Sleep(5 * time.Second)
time.Sleep(r.waitDuration)
}
if r.currentResponse == nil {
r.currentResponse, err = r.client.Do(r.request)
@ -49,7 +50,7 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
if err != nil && r.failures+1 != r.maxFailures {
r.cleanUpResponse()
r.failures++
time.Sleep(5 * time.Duration(r.failures) * time.Second)
time.Sleep(time.Duration(r.failures) * r.waitDuration)
return 0, nil
} else if err != nil {
r.cleanUpResponse()

View file

@ -8,6 +8,7 @@ import (
"net/http/httptest"
"strings"
"testing"
"time"
)
func TestResumableRequestHeaderSimpleErrors(t *testing.T) {
@ -55,10 +56,11 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) {
}
resreq := &resumableRequestReader{
client: client,
request: badReq,
failures: 0,
maxFailures: 2,
client: client,
request: badReq,
failures: 0,
maxFailures: 2,
waitDuration: 10 * time.Millisecond,
}
read, err := resreq.Read([]byte{})
if err != nil || read != 0 {

View file

@ -234,7 +234,7 @@ func TestConsumeWithSpeed(t *testing.T) {
reader := strings.NewReader("1234567890")
chunksize := 2
bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil)
bytes1, err := ConsumeWithSpeed(reader, chunksize, 10*time.Millisecond, nil)
if err != nil {
t.Fatal(err)
}