add defer file.Close to avoid potential fd leak

Signed-off-by: allencloud <allen.sun@daocloud.io>
This commit is contained in:
allencloud 2016-06-25 11:57:21 +08:00
parent 2684459ed4
commit 0ead624473
25 changed files with 93 additions and 26 deletions

View File

@ -162,6 +162,7 @@ func runBuild(dockerCli *client.DockerCli, options buildOptions) error {
if err != nil && !os.IsNotExist(err) {
return err
}
defer f.Close()
var excludes []string
if err == nil {

View File

@ -255,9 +255,9 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
// ignoring error because the file was already opened successfully
tmpFileSt, err := tmpFile.Stat()
if err != nil {
tmpFile.Close()
return
}
tmpFile.Close()
// Set the mtime to the Last-Modified header value if present
// Otherwise just remove atime and mtime
@ -272,6 +272,8 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
}
}
tmpFile.Close()
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
return
}

View File

@ -21,6 +21,7 @@ func main() {
if err != nil {
panic(err)
}
defer f.Close()
d := parser.Directive{LookingForDirectives: true}
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)

View File

@ -38,6 +38,7 @@ func TestTestNegative(t *testing.T) {
if err != nil {
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
}
defer df.Close()
d := Directive{LookingForDirectives: true}
SetEscapeToken(DefaultEscapeToken, &d)
@ -45,8 +46,6 @@ func TestTestNegative(t *testing.T) {
if err == nil {
t.Fatalf("No error parsing broken dockerfile for %s", dir)
}
df.Close()
}
}

View File

@ -36,6 +36,7 @@ func (c DockerIgnoreContext) Process(filesToRemove []string) error {
return err
}
excludes, _ := dockerignore.ReadAll(f)
f.Close()
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
for _, fileToRemove := range filesToRemove {
rm, _ := fileutils.Matches(fileToRemove, excludes)

View File

@ -12,11 +12,11 @@ import (
// ReadAll reads a .dockerignore file and returns the list of file patterns
// to ignore. Note this will trim whitespace from each line as well
// as use GO's "clean" func to get the shortest/cleanest path for each.
func ReadAll(reader io.ReadCloser) ([]string, error) {
func ReadAll(reader io.Reader) ([]string, error) {
if reader == nil {
return nil, nil
}
defer reader.Close()
scanner := bufio.NewScanner(reader)
var excludes []string
currentLine := 0

View File

@ -35,6 +35,8 @@ func TestReadAll(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer diFd.Close()
di, err = ReadAll(diFd)
if err != nil {
t.Fatal(err)

View File

@ -484,6 +484,8 @@ func TestJsonSaveWithNoFile(t *testing.T) {
fn := filepath.Join(tmpHome, ConfigFileName)
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
defer f.Close()
err = config.SaveToWriter(f)
if err != nil {
t.Fatalf("Failed saving to file: %q", err)
@ -522,6 +524,8 @@ func TestLegacyJsonSaveWithNoFile(t *testing.T) {
fn := filepath.Join(tmpHome, ConfigFileName)
f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
defer f.Close()
if err = config.SaveToWriter(f); err != nil {
t.Fatalf("Failed saving to file: %q", err)
}

View File

@ -55,6 +55,8 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
}
continue
}
defer f.Close()
files = append(files, f)
}
@ -63,6 +65,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
logWatcher.Err <- err
return
}
defer latestFile.Close()
if config.Tail != 0 {
tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)

View File

@ -174,17 +174,18 @@ func (s *saveSession) save(outStream io.Writer) error {
if len(reposLegacy) > 0 {
reposFile := filepath.Join(tempDir, legacyRepositoriesFileName)
f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
f.Close()
return err
}
if err := json.NewEncoder(f).Encode(reposLegacy); err != nil {
return err
}
if err := f.Close(); err != nil {
if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil {
rf.Close()
return err
}
rf.Close()
if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
return err
}
@ -193,15 +194,16 @@ func (s *saveSession) save(outStream io.Writer) error {
manifestFileName := filepath.Join(tempDir, manifestFileName)
f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
if err := json.NewEncoder(f).Encode(manifest); err != nil {
f.Close()
return err
}
if err := json.NewEncoder(f).Encode(manifest); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
f.Close()
if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
return err
}

View File

@ -1151,6 +1151,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
if err != nil {
c.Fatal(err)
}
defer f.Close()
var res struct {
Log string `json:"log"`
Stream string `json:"stream"`
@ -1229,6 +1231,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
if err != nil {
c.Fatal(err)
}
defer f.Close()
var res struct {
Log string `json:"log"`
Stream string `json:"stream"`

View File

@ -131,6 +131,7 @@ func testPushEmptyLayer(c *check.C) {
freader, err := os.Open(emptyTarball.Name())
c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball"))
defer freader.Close()
importCmd := exec.Command(dockerBinary, "import", "-", repoName)
importCmd.Stdin = freader

View File

@ -283,6 +283,7 @@ func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) {
f, err := os.Open(layerPath)
c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err))
defer f.Close()
entries, err := listTar(f)
for _, e := range entries {

View File

@ -35,6 +35,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
tmpFile, err = os.Open(tmpFile.Name())
c.Assert(err, check.IsNil)
defer tmpFile.Close()
deleteImages(repoName)

View File

@ -76,6 +76,8 @@ http:
if err != nil {
return nil, err
}
defer config.Close()
if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL, authTemplate); err != nil {
os.RemoveAll(tmp)
return nil, err

View File

@ -175,6 +175,7 @@ var (
// We need extra check on redhat based distributions
if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil {
defer f.Close()
b := make([]byte, 1)
_, _ = f.Read(b)
if string(b) == "N" {

View File

@ -61,10 +61,10 @@ func newTestNotary(c *check.C) (*testNotary, error) {
}
confPath := filepath.Join(tmp, "config.json")
config, err := os.Create(confPath)
defer config.Close()
if err != nil {
return nil, err
}
defer config.Close()
workingDir, err := os.Getwd()
if err != nil {
@ -78,10 +78,11 @@ func newTestNotary(c *check.C) (*testNotary, error) {
// generate client config
clientConfPath := filepath.Join(tmp, "client-config.json")
clientConfig, err := os.Create(clientConfPath)
defer clientConfig.Close()
if err != nil {
return nil, err
}
defer clientConfig.Close()
template = `{
"trust_dir" : "%s",
"remote_server": {

View File

@ -741,18 +741,20 @@ func TestTarStreamVerification(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer src.Close()
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
t.Fatal(err)
}
src.Close()
dst.Close()
src.Sync()
dst.Sync()
ts, err := layer2.TarStream()
if err != nil {

View File

@ -216,11 +216,11 @@ func (r *remote) Client(b Backend) (Client, error) {
func (r *remote) updateEventTimestamp(t time.Time) {
f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600)
defer f.Close()
if err != nil {
logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err)
return
}
defer f.Close()
b, err := t.MarshalText()
if err != nil {
@ -245,11 +245,11 @@ func (r *remote) getLastEventTimestamp() time.Time {
}
f, err := os.Open(r.eventTsPath)
defer f.Close()
if err != nil {
logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
return t
}
defer f.Close()
b := make([]byte, fi.Size())
n, err := f.Read(b)
@ -329,10 +329,10 @@ func (r *remote) handleEventStream(events containerd.API_EventsClient) {
func (r *remote) runContainerdDaemon() error {
pidFilename := filepath.Join(r.stateDir, containerdPidFilename)
f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600)
defer f.Close()
if err != nil {
return err
}
defer f.Close()
// File exist, check if the daemon is alive
b := make([]byte, 8)

View File

@ -101,6 +101,11 @@ func TestDecompressStreamGzip(t *testing.T) {
t.Fatalf("Fail to create an archive file for test : %s.", output)
}
archive, err := os.Open(tmp + "archive.gz")
if err != nil {
t.Fatalf("Fail to open file archive.gz")
}
defer archive.Close()
_, err = DecompressStream(archive)
if err != nil {
t.Fatalf("Failed to decompress a gzip file.")
@ -114,6 +119,11 @@ func TestDecompressStreamBzip2(t *testing.T) {
t.Fatalf("Fail to create an archive file for test : %s.", output)
}
archive, err := os.Open(tmp + "archive.bz2")
if err != nil {
t.Fatalf("Fail to open file archive.bz2")
}
defer archive.Close()
_, err = DecompressStream(archive)
if err != nil {
t.Fatalf("Failed to decompress a bzip2 file.")
@ -130,6 +140,10 @@ func TestDecompressStreamXz(t *testing.T) {
t.Fatalf("Fail to create an archive file for test : %s.", output)
}
archive, err := os.Open(tmp + "archive.xz")
if err != nil {
t.Fatalf("Fail to open file archive.xz")
}
defer archive.Close()
_, err = DecompressStream(archive)
if err != nil {
t.Fatalf("Failed to decompress an xz file.")
@ -141,6 +155,8 @@ func TestCompressStreamXzUnsuported(t *testing.T) {
if err != nil {
t.Fatalf("Fail to create the destination file")
}
defer dest.Close()
_, err = CompressStream(dest, Xz)
if err == nil {
t.Fatalf("Should fail as xz is unsupported for compression format.")
@ -152,6 +168,8 @@ func TestCompressStreamBzip2Unsupported(t *testing.T) {
if err != nil {
t.Fatalf("Fail to create the destination file")
}
defer dest.Close()
_, err = CompressStream(dest, Xz)
if err == nil {
t.Fatalf("Should fail as xz is unsupported for compression format.")
@ -163,6 +181,8 @@ func TestCompressStreamInvalid(t *testing.T) {
if err != nil {
t.Fatalf("Fail to create the destination file")
}
defer dest.Close()
_, err = CompressStream(dest, -1)
if err == nil {
t.Fatalf("Should fail as xz is unsupported for compression format.")
@ -795,6 +815,8 @@ func TestUntarUstarGnuConflict(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer f.Close()
found := false
tr := tar.NewReader(f)
// Iterate through the files in the archive.

View File

@ -14,6 +14,8 @@ func TestTarSumRemoveNonExistent(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer reader.Close()
ts, err := NewTarSum(reader, false, Version0)
if err != nil {
t.Fatal(err)
@ -42,6 +44,8 @@ func TestTarSumRemove(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer reader.Close()
ts, err := NewTarSum(reader, false, Version0)
if err != nil {
t.Fatal(err)

View File

@ -200,6 +200,8 @@ func TestNewTarSumForLabel(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer reader.Close()
label := strings.Split(layer.tarsum, ":")[0]
ts, err := NewTarSumForLabel(reader, false, label)
if err != nil {
@ -302,6 +304,8 @@ func TestTarSumsReadSize(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer reader.Close()
ts, err := NewTarSum(reader, false, layer.version)
if err != nil {
t.Fatal(err)
@ -380,6 +384,8 @@ func TestTarSums(t *testing.T) {
t.Errorf("failed to open %s: %s", layer.jsonfile, err)
continue
}
defer jfh.Close()
buf, err := ioutil.ReadAll(jfh)
if err != nil {
t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
@ -559,12 +565,13 @@ func Benchmark9kTar(b *testing.B) {
b.Error(err)
return
}
defer fh.Close()
n, err := io.Copy(buf, fh)
if err != nil {
b.Error(err)
return
}
fh.Close()
reader := bytes.NewReader(buf.Bytes())
@ -589,12 +596,13 @@ func Benchmark9kTarGzip(b *testing.B) {
b.Error(err)
return
}
defer fh.Close()
n, err := io.Copy(buf, fh)
if err != nil {
b.Error(err)
return
}
fh.Close()
reader := bytes.NewReader(buf.Bytes())

View File

@ -124,6 +124,8 @@ func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.A
if err != nil {
return err
}
defer rootfs.Close()
_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs)
// XXX: Ignore returning digest for now.
// Since digest needs to be written to the ProgressWriter.

View File

@ -299,6 +299,7 @@ func (pm *Manager) init() error {
}
return err
}
defer dt.Close()
if err := json.NewDecoder(dt).Decode(&pm.plugins); err != nil {
return err

View File

@ -102,6 +102,8 @@ func IsLoaded(name string) error {
if err != nil {
return err
}
defer file.Close()
r := bufio.NewReader(file)
for {
p, err := r.ReadString('\n')