mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
add defer file.Close to avoid potential fd leak
Signed-off-by: allencloud <allen.sun@daocloud.io>
This commit is contained in:
parent
2684459ed4
commit
0ead624473
25 changed files with 93 additions and 26 deletions
|
@ -162,6 +162,7 @@ func runBuild(dockerCli *client.DockerCli, options buildOptions) error {
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
var excludes []string
|
var excludes []string
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -255,9 +255,9 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
||||||
// ignoring error because the file was already opened successfully
|
// ignoring error because the file was already opened successfully
|
||||||
tmpFileSt, err := tmpFile.Stat()
|
tmpFileSt, err := tmpFile.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
tmpFile.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
tmpFile.Close()
|
|
||||||
|
|
||||||
// Set the mtime to the Last-Modified header value if present
|
// Set the mtime to the Last-Modified header value if present
|
||||||
// Otherwise just remove atime and mtime
|
// Otherwise just remove atime and mtime
|
||||||
|
@ -272,6 +272,8 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tmpFile.Close()
|
||||||
|
|
||||||
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
d := parser.Directive{LookingForDirectives: true}
|
d := parser.Directive{LookingForDirectives: true}
|
||||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)
|
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)
|
||||||
|
|
|
@ -38,6 +38,7 @@ func TestTestNegative(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
||||||
}
|
}
|
||||||
|
defer df.Close()
|
||||||
|
|
||||||
d := Directive{LookingForDirectives: true}
|
d := Directive{LookingForDirectives: true}
|
||||||
SetEscapeToken(DefaultEscapeToken, &d)
|
SetEscapeToken(DefaultEscapeToken, &d)
|
||||||
|
@ -45,8 +46,6 @@ func TestTestNegative(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
df.Close()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ func (c DockerIgnoreContext) Process(filesToRemove []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
excludes, _ := dockerignore.ReadAll(f)
|
excludes, _ := dockerignore.ReadAll(f)
|
||||||
|
f.Close()
|
||||||
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
||||||
for _, fileToRemove := range filesToRemove {
|
for _, fileToRemove := range filesToRemove {
|
||||||
rm, _ := fileutils.Matches(fileToRemove, excludes)
|
rm, _ := fileutils.Matches(fileToRemove, excludes)
|
||||||
|
|
|
@ -12,11 +12,11 @@ import (
|
||||||
// ReadAll reads a .dockerignore file and returns the list of file patterns
|
// ReadAll reads a .dockerignore file and returns the list of file patterns
|
||||||
// to ignore. Note this will trim whitespace from each line as well
|
// to ignore. Note this will trim whitespace from each line as well
|
||||||
// as use GO's "clean" func to get the shortest/cleanest path for each.
|
// as use GO's "clean" func to get the shortest/cleanest path for each.
|
||||||
func ReadAll(reader io.ReadCloser) ([]string, error) {
|
func ReadAll(reader io.Reader) ([]string, error) {
|
||||||
if reader == nil {
|
if reader == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
var excludes []string
|
var excludes []string
|
||||||
currentLine := 0
|
currentLine := 0
|
||||||
|
|
|
@ -35,6 +35,8 @@ func TestReadAll(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer diFd.Close()
|
||||||
|
|
||||||
di, err = ReadAll(diFd)
|
di, err = ReadAll(diFd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -484,6 +484,8 @@ func TestJsonSaveWithNoFile(t *testing.T) {
|
||||||
|
|
||||||
fn := filepath.Join(tmpHome, ConfigFileName)
|
fn := filepath.Join(tmpHome, ConfigFileName)
|
||||||
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
err = config.SaveToWriter(f)
|
err = config.SaveToWriter(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed saving to file: %q", err)
|
t.Fatalf("Failed saving to file: %q", err)
|
||||||
|
@ -522,6 +524,8 @@ func TestLegacyJsonSaveWithNoFile(t *testing.T) {
|
||||||
|
|
||||||
fn := filepath.Join(tmpHome, ConfigFileName)
|
fn := filepath.Join(tmpHome, ConfigFileName)
|
||||||
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
if err = config.SaveToWriter(f); err != nil {
|
if err = config.SaveToWriter(f); err != nil {
|
||||||
t.Fatalf("Failed saving to file: %q", err)
|
t.Fatalf("Failed saving to file: %q", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,8 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
files = append(files, f)
|
files = append(files, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,6 +65,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
||||||
logWatcher.Err <- err
|
logWatcher.Err <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer latestFile.Close()
|
||||||
|
|
||||||
if config.Tail != 0 {
|
if config.Tail != 0 {
|
||||||
tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
|
tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
|
||||||
|
|
|
@ -174,17 +174,18 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||||
|
|
||||||
if len(reposLegacy) > 0 {
|
if len(reposLegacy) > 0 {
|
||||||
reposFile := filepath.Join(tempDir, legacyRepositoriesFileName)
|
reposFile := filepath.Join(tempDir, legacyRepositoriesFileName)
|
||||||
f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Close()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := json.NewEncoder(f).Encode(reposLegacy); err != nil {
|
|
||||||
return err
|
if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil {
|
||||||
}
|
rf.Close()
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rf.Close()
|
||||||
|
|
||||||
if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -193,15 +194,16 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||||
manifestFileName := filepath.Join(tempDir, manifestFileName)
|
manifestFileName := filepath.Join(tempDir, manifestFileName)
|
||||||
f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewEncoder(f).Encode(manifest); err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := json.NewEncoder(f).Encode(manifest); err != nil {
|
|
||||||
return err
|
f.Close()
|
||||||
}
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1151,6 +1151,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatal(err)
|
c.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
var res struct {
|
var res struct {
|
||||||
Log string `json:"log"`
|
Log string `json:"log"`
|
||||||
Stream string `json:"stream"`
|
Stream string `json:"stream"`
|
||||||
|
@ -1229,6 +1231,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatal(err)
|
c.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
var res struct {
|
var res struct {
|
||||||
Log string `json:"log"`
|
Log string `json:"log"`
|
||||||
Stream string `json:"stream"`
|
Stream string `json:"stream"`
|
||||||
|
|
|
@ -131,6 +131,7 @@ func testPushEmptyLayer(c *check.C) {
|
||||||
|
|
||||||
freader, err := os.Open(emptyTarball.Name())
|
freader, err := os.Open(emptyTarball.Name())
|
||||||
c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball"))
|
c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball"))
|
||||||
|
defer freader.Close()
|
||||||
|
|
||||||
importCmd := exec.Command(dockerBinary, "import", "-", repoName)
|
importCmd := exec.Command(dockerBinary, "import", "-", repoName)
|
||||||
importCmd.Stdin = freader
|
importCmd.Stdin = freader
|
||||||
|
|
|
@ -283,6 +283,7 @@ func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) {
|
||||||
|
|
||||||
f, err := os.Open(layerPath)
|
f, err := os.Open(layerPath)
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err))
|
c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err))
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
entries, err := listTar(f)
|
entries, err := listTar(f)
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
|
|
|
@ -35,6 +35,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
|
||||||
|
|
||||||
tmpFile, err = os.Open(tmpFile.Name())
|
tmpFile, err = os.Open(tmpFile.Name())
|
||||||
c.Assert(err, check.IsNil)
|
c.Assert(err, check.IsNil)
|
||||||
|
defer tmpFile.Close()
|
||||||
|
|
||||||
deleteImages(repoName)
|
deleteImages(repoName)
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,8 @@ http:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer config.Close()
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL, authTemplate); err != nil {
|
if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL, authTemplate); err != nil {
|
||||||
os.RemoveAll(tmp)
|
os.RemoveAll(tmp)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -175,6 +175,7 @@ var (
|
||||||
|
|
||||||
// We need extra check on redhat based distributions
|
// We need extra check on redhat based distributions
|
||||||
if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil {
|
if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil {
|
||||||
|
defer f.Close()
|
||||||
b := make([]byte, 1)
|
b := make([]byte, 1)
|
||||||
_, _ = f.Read(b)
|
_, _ = f.Read(b)
|
||||||
if string(b) == "N" {
|
if string(b) == "N" {
|
||||||
|
|
|
@ -61,10 +61,10 @@ func newTestNotary(c *check.C) (*testNotary, error) {
|
||||||
}
|
}
|
||||||
confPath := filepath.Join(tmp, "config.json")
|
confPath := filepath.Join(tmp, "config.json")
|
||||||
config, err := os.Create(confPath)
|
config, err := os.Create(confPath)
|
||||||
defer config.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer config.Close()
|
||||||
|
|
||||||
workingDir, err := os.Getwd()
|
workingDir, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -78,10 +78,11 @@ func newTestNotary(c *check.C) (*testNotary, error) {
|
||||||
// generate client config
|
// generate client config
|
||||||
clientConfPath := filepath.Join(tmp, "client-config.json")
|
clientConfPath := filepath.Join(tmp, "client-config.json")
|
||||||
clientConfig, err := os.Create(clientConfPath)
|
clientConfig, err := os.Create(clientConfPath)
|
||||||
defer clientConfig.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer clientConfig.Close()
|
||||||
|
|
||||||
template = `{
|
template = `{
|
||||||
"trust_dir" : "%s",
|
"trust_dir" : "%s",
|
||||||
"remote_server": {
|
"remote_server": {
|
||||||
|
|
|
@ -741,18 +741,20 @@ func TestTarStreamVerification(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer src.Close()
|
||||||
|
|
||||||
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
|
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer dst.Close()
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
src.Close()
|
src.Sync()
|
||||||
dst.Close()
|
dst.Sync()
|
||||||
|
|
||||||
ts, err := layer2.TarStream()
|
ts, err := layer2.TarStream()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -216,11 +216,11 @@ func (r *remote) Client(b Backend) (Client, error) {
|
||||||
|
|
||||||
func (r *remote) updateEventTimestamp(t time.Time) {
|
func (r *remote) updateEventTimestamp(t time.Time) {
|
||||||
f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600)
|
f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600)
|
||||||
defer f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err)
|
logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
b, err := t.MarshalText()
|
b, err := t.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -245,11 +245,11 @@ func (r *remote) getLastEventTimestamp() time.Time {
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Open(r.eventTsPath)
|
f, err := os.Open(r.eventTsPath)
|
||||||
defer f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
|
logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
b := make([]byte, fi.Size())
|
b := make([]byte, fi.Size())
|
||||||
n, err := f.Read(b)
|
n, err := f.Read(b)
|
||||||
|
@ -329,10 +329,10 @@ func (r *remote) handleEventStream(events containerd.API_EventsClient) {
|
||||||
func (r *remote) runContainerdDaemon() error {
|
func (r *remote) runContainerdDaemon() error {
|
||||||
pidFilename := filepath.Join(r.stateDir, containerdPidFilename)
|
pidFilename := filepath.Join(r.stateDir, containerdPidFilename)
|
||||||
f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600)
|
f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
defer f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
// File exist, check if the daemon is alive
|
// File exist, check if the daemon is alive
|
||||||
b := make([]byte, 8)
|
b := make([]byte, 8)
|
||||||
|
|
|
@ -101,6 +101,11 @@ func TestDecompressStreamGzip(t *testing.T) {
|
||||||
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
||||||
}
|
}
|
||||||
archive, err := os.Open(tmp + "archive.gz")
|
archive, err := os.Open(tmp + "archive.gz")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Fail to open file archive.gz")
|
||||||
|
}
|
||||||
|
defer archive.Close()
|
||||||
|
|
||||||
_, err = DecompressStream(archive)
|
_, err = DecompressStream(archive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to decompress a gzip file.")
|
t.Fatalf("Failed to decompress a gzip file.")
|
||||||
|
@ -114,6 +119,11 @@ func TestDecompressStreamBzip2(t *testing.T) {
|
||||||
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
||||||
}
|
}
|
||||||
archive, err := os.Open(tmp + "archive.bz2")
|
archive, err := os.Open(tmp + "archive.bz2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Fail to open file archive.bz2")
|
||||||
|
}
|
||||||
|
defer archive.Close()
|
||||||
|
|
||||||
_, err = DecompressStream(archive)
|
_, err = DecompressStream(archive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to decompress a bzip2 file.")
|
t.Fatalf("Failed to decompress a bzip2 file.")
|
||||||
|
@ -130,6 +140,10 @@ func TestDecompressStreamXz(t *testing.T) {
|
||||||
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
t.Fatalf("Fail to create an archive file for test : %s.", output)
|
||||||
}
|
}
|
||||||
archive, err := os.Open(tmp + "archive.xz")
|
archive, err := os.Open(tmp + "archive.xz")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Fail to open file archive.xz")
|
||||||
|
}
|
||||||
|
defer archive.Close()
|
||||||
_, err = DecompressStream(archive)
|
_, err = DecompressStream(archive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to decompress an xz file.")
|
t.Fatalf("Failed to decompress an xz file.")
|
||||||
|
@ -141,6 +155,8 @@ func TestCompressStreamXzUnsuported(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to create the destination file")
|
t.Fatalf("Fail to create the destination file")
|
||||||
}
|
}
|
||||||
|
defer dest.Close()
|
||||||
|
|
||||||
_, err = CompressStream(dest, Xz)
|
_, err = CompressStream(dest, Xz)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
||||||
|
@ -152,6 +168,8 @@ func TestCompressStreamBzip2Unsupported(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to create the destination file")
|
t.Fatalf("Fail to create the destination file")
|
||||||
}
|
}
|
||||||
|
defer dest.Close()
|
||||||
|
|
||||||
_, err = CompressStream(dest, Xz)
|
_, err = CompressStream(dest, Xz)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
||||||
|
@ -163,6 +181,8 @@ func TestCompressStreamInvalid(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to create the destination file")
|
t.Fatalf("Fail to create the destination file")
|
||||||
}
|
}
|
||||||
|
defer dest.Close()
|
||||||
|
|
||||||
_, err = CompressStream(dest, -1)
|
_, err = CompressStream(dest, -1)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
t.Fatalf("Should fail as xz is unsupported for compression format.")
|
||||||
|
@ -795,6 +815,8 @@ func TestUntarUstarGnuConflict(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
tr := tar.NewReader(f)
|
tr := tar.NewReader(f)
|
||||||
// Iterate through the files in the archive.
|
// Iterate through the files in the archive.
|
||||||
|
|
|
@ -14,6 +14,8 @@ func TestTarSumRemoveNonExistent(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
ts, err := NewTarSum(reader, false, Version0)
|
ts, err := NewTarSum(reader, false, Version0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -42,6 +44,8 @@ func TestTarSumRemove(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
ts, err := NewTarSum(reader, false, Version0)
|
ts, err := NewTarSum(reader, false, Version0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -200,6 +200,8 @@ func TestNewTarSumForLabel(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
label := strings.Split(layer.tarsum, ":")[0]
|
label := strings.Split(layer.tarsum, ":")[0]
|
||||||
ts, err := NewTarSumForLabel(reader, false, label)
|
ts, err := NewTarSumForLabel(reader, false, label)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -302,6 +304,8 @@ func TestTarSumsReadSize(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
ts, err := NewTarSum(reader, false, layer.version)
|
ts, err := NewTarSum(reader, false, layer.version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -380,6 +384,8 @@ func TestTarSums(t *testing.T) {
|
||||||
t.Errorf("failed to open %s: %s", layer.jsonfile, err)
|
t.Errorf("failed to open %s: %s", layer.jsonfile, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
defer jfh.Close()
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(jfh)
|
buf, err := ioutil.ReadAll(jfh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
|
t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
|
||||||
|
@ -559,12 +565,13 @@ func Benchmark9kTar(b *testing.B) {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
n, err := io.Copy(buf, fh)
|
n, err := io.Copy(buf, fh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fh.Close()
|
|
||||||
|
|
||||||
reader := bytes.NewReader(buf.Bytes())
|
reader := bytes.NewReader(buf.Bytes())
|
||||||
|
|
||||||
|
@ -589,12 +596,13 @@ func Benchmark9kTarGzip(b *testing.B) {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
n, err := io.Copy(buf, fh)
|
n, err := io.Copy(buf, fh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fh.Close()
|
|
||||||
|
|
||||||
reader := bytes.NewReader(buf.Bytes())
|
reader := bytes.NewReader(buf.Bytes())
|
||||||
|
|
||||||
|
|
|
@ -124,6 +124,8 @@ func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.A
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer rootfs.Close()
|
||||||
|
|
||||||
_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs)
|
_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs)
|
||||||
// XXX: Ignore returning digest for now.
|
// XXX: Ignore returning digest for now.
|
||||||
// Since digest needs to be written to the ProgressWriter.
|
// Since digest needs to be written to the ProgressWriter.
|
||||||
|
|
|
@ -299,6 +299,7 @@ func (pm *Manager) init() error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer dt.Close()
|
||||||
|
|
||||||
if err := json.NewDecoder(dt).Decode(&pm.plugins); err != nil {
|
if err := json.NewDecoder(dt).Decode(&pm.plugins); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -102,6 +102,8 @@ func IsLoaded(name string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
r := bufio.NewReader(file)
|
r := bufio.NewReader(file)
|
||||||
for {
|
for {
|
||||||
p, err := r.ReadString('\n')
|
p, err := r.ReadString('\n')
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue