diff --git a/client/build_prune.go b/client/build_prune.go index 397d67cdcf..2b6606236e 100644 --- a/client/build_prune.go +++ b/client/build_prune.go @@ -3,8 +3,8 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "fmt" "net/url" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -23,12 +23,12 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru if opts.All { query.Set("all", "1") } - query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) - filters, err := filters.ToJSON(opts.Filters) + query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + f, err := filters.ToJSON(opts.Filters) if err != nil { return nil, errors.Wrap(err, "prune could not marshal filters option") } - query.Set("filters", filters) + query.Set("filters", f) serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) defer ensureReaderClosed(serverResp) @@ -38,7 +38,7 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru } if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + return nil, errors.Wrap(err, "error retrieving disk usage") } return &report, nil diff --git a/daemon/config/config.go b/daemon/config/config.go index cf2f0f648e..a47515d161 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -224,7 +224,7 @@ type CommonConfig struct { DNSConfig LogConfig - BridgeConfig // bridgeConfig holds bridge network specific configuration. + BridgeConfig // BridgeConfig holds bridge network specific configuration. NetworkConfig registry.ServiceOptions @@ -317,7 +317,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) { if len(stringSlice) > 1 { // If there is a conflict we will return an error if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { - return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + return nil, errors.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) } labelMap[stringSlice[0]] = stringSlice[1] } @@ -325,7 +325,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) { newLabels := []string{} for k, v := range labelMap { - newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + newLabels = append(newLabels, k+"="+v) } return newLabels, nil } @@ -521,7 +521,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag for key := range unknownKeys { unknown = append(unknown, key) } - return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + return errors.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) } var conflicts []string @@ -555,7 +555,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag flags.Visit(duplicatedConflicts) if len(conflicts) > 0 { - return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + return errors.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) } return nil } @@ -572,7 +572,7 @@ func Validate(config *Config) error { // validate log-level if config.LogLevel != "" { if _, err := logrus.ParseLevel(config.LogLevel); err != nil { - return fmt.Errorf("invalid logging level: %s", config.LogLevel) + return errors.Errorf("invalid logging level: %s", config.LogLevel) } } @@ -599,22 +599,22 @@ func Validate(config *Config) error { // TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem if config.Mtu < 0 { - return fmt.Errorf("invalid default MTU: %d", config.Mtu) + return errors.Errorf("invalid default MTU: %d", config.Mtu) } if config.MaxConcurrentDownloads < 0 { - return fmt.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads) + return errors.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads) } if config.MaxConcurrentUploads < 0 { - return fmt.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads) + return errors.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads) } if config.MaxDownloadAttempts < 0 { - return fmt.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts) + return errors.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts) } // validate that "default" runtime is not reset if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { if _, ok := runtimes[StockRuntimeName]; ok { - return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) + return errors.Errorf("runtime name '%s' is reserved", StockRuntimeName) } } @@ -626,7 +626,7 @@ func Validate(config *Config) error { if !builtinRuntimes[defaultRuntime] { runtimes := config.GetAllRuntimes() if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) { - return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime) } } } diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index dee8ef57fe..efd70da7f8 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -32,6 +32,7 @@ import ( "os/exec" "path" "path/filepath" + "strconv" "strings" "sync" @@ -209,8 +210,8 @@ func (a *Driver) Status() [][2]string { return [][2]string{ {"Root Dir", a.rootPath()}, {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + {"Dirs", strconv.Itoa(len(ids))}, + {"Dirperm1 Supported", strconv.FormatBool(useDirperm())}, } } diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 26142acfc5..61b70739bc 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -10,6 +10,7 @@ import ( "os" "path" "path/filepath" + "strconv" "sync" "testing" @@ -651,8 +652,8 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { for i := 1; i < 127; i++ { expected++ var ( - parent = fmt.Sprintf("%d", i-1) - current = fmt.Sprintf("%d", i) + parent = strconv.Itoa(i - 1) + current = strconv.Itoa(i) ) if parent == "0" { diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index bebe8f5d69..720f1ab0cd 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -156,7 +156,7 @@ func (d *Driver) Status() [][2]string { status = append(status, [2]string{"Build Version", bv}) } if lv := btrfsLibVersion(); lv != -1 { - status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + status = append(status, [2]string{"Library Version", strconv.Itoa(lv)}) } return status } diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 2fbd6ebc48..b704a79052 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -231,7 +231,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { } func (d *Driver) cloneFilesystem(name, parentName string) error { - snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + snapshotName := strconv.Itoa(time.Now().Nanosecond()) parentDataset := zfs.Dataset{Name: parentName} snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) if err != nil { diff --git a/daemon/keys.go b/daemon/keys.go index 3a494fef22..e736946355 100644 --- a/daemon/keys.go +++ b/daemon/keys.go @@ -4,7 +4,6 @@ package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" "os" "strconv" "strings" @@ -38,7 +37,8 @@ func setRootKeyLimit(limit int) error { return err } defer keys.Close() - if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + _, err = keys.WriteString(strconv.Itoa(limit)) + if err != nil { return err } bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) @@ -46,7 +46,7 @@ func setRootKeyLimit(limit int) error { return err } defer bytes.Close() - _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + _, err = bytes.WriteString(strconv.Itoa(limit * rootKeyByteMultiplier)) return err } diff --git a/daemon/kill.go b/daemon/kill.go index 953249c627..6020501844 100644 --- a/daemon/kill.go +++ b/daemon/kill.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "runtime" + "strconv" "syscall" "time" @@ -125,10 +126,9 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign } } - attributes := map[string]string{ - "signal": fmt.Sprintf("%d", stopSignal), - } - daemon.LogContainerEventWithAttributes(container, "kill", attributes) + daemon.LogContainerEventWithAttributes(container, "kill", map[string]string{ + "signal": strconv.Itoa(int(stopSignal)), + }) return nil } diff --git a/daemon/links/links_test.go b/daemon/links/links_test.go index e1b36dbbd9..2d624759fd 100644 --- a/daemon/links/links_test.go +++ b/daemon/links/links_test.go @@ -2,6 +2,7 @@ package links // import "github.com/docker/docker/daemon/links" import ( "fmt" + "strconv" "strings" "testing" @@ -200,7 +201,7 @@ func TestLinkPortRangeEnv(t *testing.T) { if env[tcpaddr] != "172.0.17.2" { t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) } - if env[tcpport] != fmt.Sprintf("%d", i) { + if env[tcpport] != strconv.Itoa(i) { t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) } if env[tcpproto] != "tcp" { diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go index f28b029452..d9a04d8a05 100644 --- a/daemon/logger/awslogs/cloudwatchlogs_test.go +++ b/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -1426,7 +1426,7 @@ func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { times := maximumLogEventsPerPut timestamp := time.Now() for i := 0; i < times; i++ { - line := fmt.Sprintf("%d", i) + line := strconv.Itoa(i) if i%2 == 0 { timestamp.Add(1 * time.Nanosecond) } diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go index 3ccf1d1a96..db1399cb03 100644 --- a/daemon/logger/jsonfilelog/jsonfilelog_test.go +++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "encoding/json" - "fmt" "io" "os" "path/filepath" @@ -128,7 +127,7 @@ func BenchmarkJSONFileLoggerLog(b *testing.B) { bytes.Repeat([]byte("a long string"), 100), bytes.Repeat([]byte("a really long string"), 10000), } { - b.Run(fmt.Sprintf("%d", len(data)), func(b *testing.B) { + b.Run(strconv.Itoa(len(data)), func(b *testing.B) { testMsg := &logger.Message{ Line: data, Source: "stderr", diff --git a/daemon/logger/local/local_test.go b/daemon/logger/local/local_test.go index 28de7d2485..fe24648065 100644 --- a/daemon/logger/local/local_test.go +++ b/daemon/logger/local/local_test.go @@ -3,10 +3,10 @@ package local import ( "bytes" "encoding/binary" - "fmt" "io" "os" "path/filepath" + "strconv" "testing" "time" @@ -111,7 +111,7 @@ func BenchmarkLogWrite(b *testing.B) { bytes.Repeat([]byte("a long string"), 100), bytes.Repeat([]byte("a really long string"), 10000), } { - b.Run(fmt.Sprintf("%d", len(data)), func(b *testing.B) { + b.Run(strconv.Itoa(len(data)), func(b *testing.B) { entry := &logdriver.LogEntry{Line: data, Source: "stdout", TimeNano: t.UnixNano()} b.SetBytes(int64(entry.Size() + encodeBinaryLen + encodeBinaryLen)) b.ResetTimer() diff --git a/daemon/logger/splunk/splunk_test.go b/daemon/logger/splunk/splunk_test.go index 1f2bdc3c3c..d531cac5a9 100644 --- a/daemon/logger/splunk/splunk_test.go +++ b/daemon/logger/splunk/splunk_test.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "runtime" + "strconv" "testing" "time" @@ -827,7 +828,7 @@ func TestBatching(t *testing.T) { } for i := 0; i < defaultStreamChannelSize*4; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -845,7 +846,7 @@ func TestBatching(t *testing.T) { if event, err := message.EventAsMap(); err != nil { t.Fatal(err) } else { - if event["line"] != fmt.Sprintf("%d", i) { + if event["line"] != strconv.Itoa(i) { t.Fatalf("Unexpected event in message %v", event) } } @@ -887,7 +888,7 @@ func TestFrequency(t *testing.T) { } for i := 0; i < 10; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } time.Sleep(15 * time.Millisecond) @@ -906,7 +907,7 @@ func TestFrequency(t *testing.T) { if event, err := message.EventAsMap(); err != nil { t.Fatal(err) } else { - if event["line"] != fmt.Sprintf("%d", i) { + if event["line"] != strconv.Itoa(i) { t.Fatalf("Unexpected event in message %v", event) } } @@ -958,7 +959,7 @@ func TestOneMessagePerRequest(t *testing.T) { } for i := 0; i < 10; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -976,7 +977,7 @@ func TestOneMessagePerRequest(t *testing.T) { if event, err := message.EventAsMap(); err != nil { t.Fatal(err) } else { - if event["line"] != fmt.Sprintf("%d", i) { + if event["line"] != strconv.Itoa(i) { t.Fatalf("Unexpected event in message %v", event) } } @@ -1050,7 +1051,7 @@ func TestSkipVerify(t *testing.T) { } for i := 0; i < defaultStreamChannelSize*2; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1062,7 +1063,7 @@ func TestSkipVerify(t *testing.T) { hec.simulateErr(false) for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1080,7 +1081,7 @@ func TestSkipVerify(t *testing.T) { if event, err := message.EventAsMap(); err != nil { t.Fatal(err) } else { - if event["line"] != fmt.Sprintf("%d", i) { + if event["line"] != strconv.Itoa(i) { t.Fatalf("Unexpected event in message %v", event) } } @@ -1124,7 +1125,7 @@ func TestBufferMaximum(t *testing.T) { } for i := 0; i < 11; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1193,7 +1194,7 @@ func TestServerAlwaysDown(t *testing.T) { } for i := 0; i < 5; i++ { - if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } diff --git a/daemon/reload_unix.go b/daemon/reload_unix.go index 590267c484..4adde34643 100644 --- a/daemon/reload_unix.go +++ b/daemon/reload_unix.go @@ -5,7 +5,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( "bytes" - "fmt" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" @@ -49,12 +49,12 @@ func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string] if runtimeList.Len() > 0 { runtimeList.WriteRune(' ') } - runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt.Path)) + runtimeList.WriteString(name + ":" + rt.Path) } attributes["runtimes"] = runtimeList.String() attributes["default-runtime"] = daemon.configStore.DefaultRuntime - attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) + attributes["default-shm-size"] = strconv.FormatInt(int64(daemon.configStore.ShmSize), 10) attributes["default-ipc-mode"] = daemon.configStore.IpcMode attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode diff --git a/daemon/resize.go b/daemon/resize.go index 2fd427ae9e..d1325e6297 100644 --- a/daemon/resize.go +++ b/daemon/resize.go @@ -2,7 +2,8 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" - "fmt" + "errors" + "strconv" "time" ) @@ -23,8 +24,8 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error { if err = tsk.Resize(context.Background(), uint32(width), uint32(height)); err == nil { attributes := map[string]string{ - "height": fmt.Sprintf("%d", height), - "width": fmt.Sprintf("%d", width), + "height": strconv.Itoa(height), + "width": strconv.Itoa(width), } daemon.LogContainerEventWithAttributes(container, "resize", attributes) } @@ -49,6 +50,6 @@ func (daemon *Daemon) ContainerExecResize(name string, height, width int) error case <-ec.Started: return ec.Process.Resize(context.Background(), uint32(width), uint32(height)) case <-timeout.C: - return fmt.Errorf("timeout waiting for exec session ready") + return errors.New("timeout waiting for exec session ready") } } diff --git a/daemon/runtime_unix.go b/daemon/runtime_unix.go index df16aa14d7..52e976f75f 100644 --- a/daemon/runtime_unix.go +++ b/daemon/runtime_unix.go @@ -14,7 +14,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -56,7 +55,7 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes") // Remove old temp directory if any os.RemoveAll(runtimeDir + "-old") - tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes") + tmpDir, err := os.MkdirTemp(daemon.configStore.Root, "gen-runtimes") if err != nil { return errors.Wrap(err, "failed to get temp dir to generate runtime scripts") } diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index 8c026e5e7c..bbc19cb10f 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -135,7 +135,7 @@ func (s *DockerCLIInspectSuite) TestInspectTypeFlagWithInvalidValue(c *testing.T out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") assert.Assert(c, err != nil, "%d", exitCode) - assert.Equal(c, exitCode, 1, fmt.Sprintf("%s", err)) + assert.Equal(c, exitCode, 1, err) assert.Assert(c, strings.Contains(out, "not a valid value for --type")) } diff --git a/integration/container/checkpoint_test.go b/integration/container/checkpoint_test.go index fb37fcea60..0bb8fcf581 100644 --- a/integration/container/checkpoint_test.go +++ b/integration/container/checkpoint_test.go @@ -2,7 +2,6 @@ package container // import "github.com/docker/docker/integration/container" import ( "context" - "fmt" "os/exec" "regexp" "sort" @@ -84,9 +83,9 @@ func TestCheckpoint(t *testing.T) { err = client.CheckpointCreate(ctx, cID, cptOpt) if err != nil { // An error can contain a path to a dump file - t.Logf("%s", err) + t.Log(err) re := regexp.MustCompile("path= (.*): ") - m := re.FindStringSubmatch(fmt.Sprintf("%s", err)) + m := re.FindStringSubmatch(err.Error()) if len(m) >= 2 { dumpLog := m[1] t.Logf("%s", dumpLog) diff --git a/layer/filestore.go b/layer/filestore.go index 97307c24fa..ce3a309d8d 100644 --- a/layer/filestore.go +++ b/layer/filestore.go @@ -3,7 +3,6 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" "encoding/json" - "fmt" "io" "os" "path/filepath" @@ -40,7 +39,7 @@ type fileMetadataTransaction struct { // which is backed by files on disk using the provided root // as the root of metadata files. func newFSMetadataStore(root string) (*fileMetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { + if err := os.MkdirAll(root, 0o700); err != nil { return nil, err } return &fileMetadataStore{ @@ -67,7 +66,7 @@ func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { + if err := os.MkdirAll(tmpDir, 0o755); err != nil { return nil, err } ws, err := ioutils.NewAtomicWriteSet(tmpDir) @@ -82,20 +81,19 @@ func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, erro } func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return fm.ws.WriteFile("size", []byte(content), 0644) + return fm.ws.WriteFile("size", []byte(strconv.FormatInt(size, 10)), 0o644) } func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0o644) } func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0o644) } func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0o644) } func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { @@ -103,11 +101,11 @@ func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) er if err != nil { return err } - return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) + return fm.ws.WriteFile("descriptor.json", jsonRef, 0o644) } func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return nil, err } @@ -126,7 +124,7 @@ func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteC func (fm *fileMetadataTransaction) Commit(layer ChainID) error { finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(finalDir), 0o755); err != nil { return err } @@ -236,24 +234,24 @@ func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, erro } func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { return err } - return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) + return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0o644) } func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { return err } - return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) + return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0o644) } func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil { return err } - return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) + return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0o644) } func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { diff --git a/layer/filestore_test.go b/layer/filestore_test.go index c0e6010722..554a77abb7 100644 --- a/layer/filestore_test.go +++ b/layer/filestore_test.go @@ -51,7 +51,7 @@ func TestCommitFailure(t *testing.T) { fms, td, cleanup := newFileMetadataStore(t) defer cleanup() - if err := os.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0o644); err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func TestStartTransactionFailure(t *testing.T) { fms, td, cleanup := newFileMetadataStore(t) defer cleanup() - if err := os.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0o644); err != nil { t.Fatal(err) } @@ -108,7 +108,7 @@ func TestGetOrphan(t *testing.T) { defer cleanup() layerRoot := filepath.Join(td, "sha256") - if err := os.MkdirAll(layerRoot, 0755); err != nil { + if err := os.MkdirAll(layerRoot, 0o755); err != nil { t.Fatal(err) } @@ -123,7 +123,7 @@ func TestGetOrphan(t *testing.T) { t.Fatal(err) } layerPath := fms.getLayerDirectory(layerid) - if err := os.WriteFile(filepath.Join(layerPath, "cache-id"), []byte(stringid.GenerateRandomID()), 0644); err != nil { + if err := os.WriteFile(filepath.Join(layerPath, "cache-id"), []byte(stringid.GenerateRandomID()), 0o644); err != nil { t.Fatal(err) } diff --git a/layer/layer_test.go b/layer/layer_test.go index afda53a871..6161e48751 100644 --- a/layer/layer_test.go +++ b/layer/layer_test.go @@ -139,7 +139,7 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { func (tf *testFile) ApplyFile(root string) error { fullPath := filepath.Join(root, tf.name) - if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(fullPath), 0o755); err != nil { return err } // Check if already exists @@ -247,7 +247,7 @@ func TestMountAndRegister(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() - li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0o644)) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) @@ -292,12 +292,12 @@ func TestLayerRelease(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0o644))) if err != nil { t.Fatal(err) } - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0o644))) if err != nil { t.Fatal(err) } @@ -306,12 +306,12 @@ func TestLayerRelease(t *testing.T) { t.Fatal(err) } - layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0o644))) if err != nil { t.Fatal(err) } - layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0o644))) if err != nil { t.Fatal(err) } @@ -341,12 +341,12 @@ func TestStoreRestore(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0o644))) if err != nil { t.Fatal(err) } - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0o644))) if err != nil { t.Fatal(err) } @@ -355,7 +355,7 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0o644))) if err != nil { t.Fatal(err) } @@ -374,7 +374,7 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(pathFS, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(pathFS, "testfile.txt"), []byte("nothing here"), 0o644); err != nil { t.Fatal(err) } @@ -457,14 +457,14 @@ func TestTarStreamStability(t *testing.T) { defer cleanup() files1 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0o644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0o644), } - addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0o644) files2 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), - newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0o644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0o664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0o644), } tar1, err := tarFromFiles(files1...) @@ -646,11 +646,11 @@ func TestRegisterExistingLayer(t *testing.T) { defer cleanup() baseFiles := []FileApplier{ - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0o644), } layerFiles := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0o644), } li := initWithFiles(baseFiles...) @@ -686,12 +686,12 @@ func TestTarStreamVerification(t *testing.T) { defer cleanup() files1 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0644), + newTestFile("/foo", []byte("abc"), 0o644), + newTestFile("/bar", []byte("def"), 0o644), } files2 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0600), // different perm + newTestFile("/foo", []byte("abc"), 0o644), + newTestFile("/bar", []byte("def"), 0o600), // different perm } tar1, err := tarFromFiles(files1...) diff --git a/layer/layer_unix_test.go b/layer/layer_unix_test.go index 64cda566b1..66ff7c8d32 100644 --- a/layer/layer_unix_test.go +++ b/layer/layer_unix_test.go @@ -25,12 +25,12 @@ func TestLayerSize(t *testing.T) { content1 := []byte("Base contents") content2 := []byte("Added contents") - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0o644))) if err != nil { t.Fatal(err) } - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0o644))) if err != nil { t.Fatal(err) } diff --git a/layer/migration_test.go b/layer/migration_test.go index de3e15f517..9d307fe5c2 100644 --- a/layer/migration_test.go +++ b/layer/migration_test.go @@ -16,7 +16,7 @@ import ( ) func writeTarSplitFile(name string, tarContent []byte) error { - f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return err } @@ -51,12 +51,12 @@ func TestLayerMigration(t *testing.T) { defer os.RemoveAll(td) layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0o644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0o644), } layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0o644), } tar1, err := tarFromFiles(layer1Files...) @@ -187,12 +187,12 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { defer os.RemoveAll(td) layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0o644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0o644), } layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0o644), } graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) diff --git a/layer/mount_test.go b/layer/mount_test.go index 643a52c524..dea4a436a0 100644 --- a/layer/mount_test.go +++ b/layer/mount_test.go @@ -20,8 +20,8 @@ func TestMountInit(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() - basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) - initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + basefile := newTestFile("testfile.txt", []byte("base data!"), 0o644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0o777) li := initWithFiles(basefile) layer, err := createLayer(ls, "", li) @@ -66,8 +66,8 @@ func TestMountInit(t *testing.T) { t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) } - if fi.Mode().Perm() != 0777 { - t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + if fi.Mode().Perm() != 0o777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0o777) } } @@ -83,14 +83,14 @@ func TestMountSize(t *testing.T) { content2 := []byte("Mutable contents") contentInit := []byte("why am I excluded from the size ☹") - li := initWithFiles(newTestFile("file1", content1, 0644)) + li := initWithFiles(newTestFile("file1", content1, 0o644)) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } mountInit := func(root string) error { - return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + return newTestFile("file-init", contentInit, 0o777).ApplyFile(root) } rwLayerOpts := &CreateRWLayerOpts{ InitFunc: mountInit, @@ -106,7 +106,7 @@ func TestMountSize(t *testing.T) { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(pathFS, "file2"), content2, 0755); err != nil { + if err := os.WriteFile(filepath.Join(pathFS, "file2"), content2, 0o755); err != nil { t.Fatal(err) } @@ -129,11 +129,11 @@ func TestMountChanges(t *testing.T) { defer cleanup() basefiles := []FileApplier{ - newTestFile("testfile1.txt", []byte("base data!"), 0644), - newTestFile("testfile2.txt", []byte("base data!"), 0644), - newTestFile("testfile3.txt", []byte("base data!"), 0644), + newTestFile("testfile1.txt", []byte("base data!"), 0o644), + newTestFile("testfile2.txt", []byte("base data!"), 0o644), + newTestFile("testfile3.txt", []byte("base data!"), 0o644), } - initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0o777) li := initWithFiles(basefiles...) layer, err := createLayer(ls, "", li) @@ -158,11 +158,11 @@ func TestMountChanges(t *testing.T) { t.Fatal(err) } - if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile1.txt"), 0755); err != nil { + if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile1.txt"), 0o755); err != nil { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(pathFS, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + if err := os.WriteFile(filepath.Join(pathFS, "testfile1.txt"), []byte("mount data!"), 0o755); err != nil { t.Fatal(err) } @@ -170,11 +170,11 @@ func TestMountChanges(t *testing.T) { t.Fatal(err) } - if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile3.txt"), 0755); err != nil { + if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile3.txt"), 0o755); err != nil { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(pathFS, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(pathFS, "testfile4.txt"), []byte("mount data!"), 0o644); err != nil { t.Fatal(err) } @@ -215,8 +215,8 @@ func TestMountApply(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() - basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) - newfile := newTestFile("newfile.txt", []byte("new data!"), 0755) + basefile := newTestFile("testfile.txt", []byte("base data!"), 0o644) + newfile := newTestFile("newfile.txt", []byte("new data!"), 0o755) li := initWithFiles(basefile) layer, err := createLayer(ls, "", li) diff --git a/layer/ro_layer.go b/layer/ro_layer.go index 96418cab8d..55d5a4a5bb 100644 --- a/layer/ro_layer.go +++ b/layer/ro_layer.go @@ -175,6 +175,7 @@ func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { } return } + func (vrc *verifiedReadCloser) Close() error { return vrc.rc.Close() } diff --git a/libnetwork/drivers/overlay/ovmanager/ovmanager.go b/libnetwork/drivers/overlay/ovmanager/ovmanager.go index fcb0ea9cff..effd53005f 100644 --- a/libnetwork/drivers/overlay/ovmanager/ovmanager.go +++ b/libnetwork/drivers/overlay/ovmanager/ovmanager.go @@ -117,9 +117,9 @@ func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, n.subnets = append(n.subnets, s) } - val := fmt.Sprintf("%d", n.subnets[0].vni) + val := strconv.FormatUint(uint64(n.subnets[0].vni), 10) for _, s := range n.subnets[1:] { - val = val + fmt.Sprintf(",%d", s.vni) + val = val + "," + strconv.FormatUint(uint64(s.vni), 10) } opts[netlabel.OverlayVxlanIDList] = val diff --git a/libnetwork/drivers/windows/windows.go b/libnetwork/drivers/windows/windows.go index 5a0496fded..7695dbc9e5 100644 --- a/libnetwork/drivers/windows/windows.go +++ b/libnetwork/drivers/windows/windows.go @@ -71,12 +71,12 @@ type hnsEndpoint struct { nid string profileID string Type string - //Note: Currently, the sandboxID is the same as the containerID since windows does - //not expose the sandboxID. - //In the future, windows will support a proper sandboxID that is different - //than the containerID. - //Therefore, we are using sandboxID now, so that we won't have to change this code - //when windows properly supports a sandboxID. + // Note: Currently, the sandboxID is the same as the containerID since windows does + // not expose the sandboxID. + // In the future, windows will support a proper sandboxID that is different + // than the containerID. + // Therefore, we are using sandboxID now, so that we won't have to change this code + // when windows properly supports a sandboxID. sandboxID string macAddress net.HardwareAddr epOption *endpointOption // User specified parameters @@ -377,8 +377,8 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d for i, subnet := range hnsresponse.Subnets { var gwIP, subnetIP *net.IPNet - //The gateway returned from HNS is an IPAddress. - //We need to convert it to an IPNet to use as the Gateway of driverapi.IPAMData struct + // The gateway returned from HNS is an IPAddress. + // We need to convert it to an IPNet to use as the Gateway of driverapi.IPAMData struct gwCIDR := subnet.GatewayAddress + "/32" _, gwIP, err = net.ParseCIDR(gwCIDR) if err != nil { diff --git a/libnetwork/networkdb/networkdbdiagnostic.go b/libnetwork/networkdb/networkdbdiagnostic.go index f729930314..c1cbe130ac 100644 --- a/libnetwork/networkdb/networkdbdiagnostic.go +++ b/libnetwork/networkdb/networkdbdiagnostic.go @@ -61,7 +61,7 @@ func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -95,7 +95,7 @@ func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -118,7 +118,7 @@ func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -167,7 +167,7 @@ func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -215,7 +215,7 @@ func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -252,7 +252,7 @@ func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -298,7 +298,7 @@ func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -330,7 +330,7 @@ func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -362,7 +362,7 @@ func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -410,7 +410,7 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) { @@ -448,5 +448,5 @@ func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) { diagnostic.HTTPReply(w, rsp, json) return } - diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json) } diff --git a/libnetwork/service_linux.go b/libnetwork/service_linux.go index 1900c75fee..bbe23783c7 100644 --- a/libnetwork/service_linux.go +++ b/libnetwork/service_linux.go @@ -600,7 +600,7 @@ func invokeFWMarker(path string, vip net.IP, fwMark uint32, ingressPorts []*Port cmd := &exec.Cmd{ Path: reexec.Self(), - Args: append([]string{"fwmarker"}, path, vip.String(), fmt.Sprintf("%d", fwMark), addDelOpt, ingressPortsFile, eIP.String(), lbMode), + Args: append([]string{"fwmarker"}, path, vip.String(), strconv.FormatUint(uint64(fwMark), 10), addDelOpt, ingressPortsFile, eIP.String(), lbMode), Stdout: os.Stdout, Stderr: os.Stderr, } diff --git a/libnetwork/types/types.go b/libnetwork/types/types.go index e4ade05902..caf9328bae 100644 --- a/libnetwork/types/types.go +++ b/libnetwork/types/types.go @@ -5,6 +5,7 @@ import ( "bytes" "fmt" "net" + "strconv" "strings" "github.com/ishidawataru/sctp" @@ -202,7 +203,7 @@ func (p Protocol) String() string { case SCTP: return "sctp" default: - return fmt.Sprintf("%d", p) + return strconv.Itoa(int(p)) } }