Fix logrus formatting

This fix tries to fix logrus formatting by removing `f` from
`logrus.[Error|Warn|Debug|Fatal|Panic|Info]f` when formatting string
is not present.

This fix fixes #23459.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2016-06-11 13:16:55 -07:00
parent ec1790d7f1
commit a72b45dbec
19 changed files with 42 additions and 42 deletions

View File

@ -46,7 +46,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
}
logrus.Debugf("[hijack] End of stdout")
logrus.Debug("[hijack] End of stdout")
receiveStdout <- err
}()
}
@ -62,7 +62,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
cli.restoreTerminal(inputStream)
})
}
logrus.Debugf("[hijack] End of stdin")
logrus.Debug("[hijack] End of stdin")
}
if err := resp.CloseWrite(); err != nil {

View File

@ -163,7 +163,7 @@ func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) {
func (s *Server) createMux() *mux.Router {
m := mux.NewRouter()
logrus.Debugf("Registering routers")
logrus.Debug("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())

View File

@ -284,12 +284,12 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S
// Wait for initialization to complete.
failed := <-h.tosvc
if failed {
logrus.Debugf("Aborting service start due to failure during initializtion")
logrus.Debug("Aborting service start due to failure during initializtion")
return true, 1
}
s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
logrus.Debugf("Service running")
logrus.Debug("Service running")
Loop:
for {
select {

View File

@ -393,7 +393,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
if stdin == nil || !openStdin {
return
}
logrus.Debugf("attach: stdin: begin")
logrus.Debug("attach: stdin: begin")
var err error
if tty {
@ -419,7 +419,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
cStderr.Close()
}
}
logrus.Debugf("attach: stdin: end")
logrus.Debug("attach: stdin: end")
wg.Done()
}()

View File

@ -28,7 +28,7 @@ func (s *Health) String() string {
// it returns nil.
func (s *Health) OpenMonitorChannel() chan struct{} {
if s.stop == nil {
logrus.Debugf("OpenMonitorChannel")
logrus.Debug("OpenMonitorChannel")
s.stop = make(chan struct{})
return s.stop
}
@ -38,12 +38,12 @@ func (s *Health) OpenMonitorChannel() chan struct{} {
// CloseMonitorChannel closes any existing monitor channel.
func (s *Health) CloseMonitorChannel() {
if s.stop != nil {
logrus.Debugf("CloseMonitorChannel: waiting for probe to stop")
logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
// This channel does not buffer. Once the write succeeds, the monitor
// has read the stop request and will not make any further updates
// to c.State.Health.
s.stop <- struct{}{}
s.stop = nil
logrus.Debugf("CloseMonitorChannel done")
logrus.Debug("CloseMonitorChannel done")
}
}

View File

@ -114,7 +114,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadClose
r, w := io.Pipe()
go func() {
defer w.Close()
defer logrus.Debugf("Closing buffered stdin pipe")
defer logrus.Debug("Closing buffered stdin pipe")
io.Copy(w, stdin)
}()
stdinPipe = r

View File

@ -175,7 +175,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
r, w := io.Pipe()
go func() {
defer w.Close()
defer logrus.Debugf("Closing buffered stdin pipe")
defer logrus.Debug("Closing buffered stdin pipe")
pools.Copy(w, stdin)
}()
cStdin = r

View File

@ -699,7 +699,7 @@ func (devices *DeviceSet) startDeviceDeletionWorker() {
return
}
logrus.Debugf("devmapper: Worker to cleanup deleted devices started")
logrus.Debug("devmapper: Worker to cleanup deleted devices started")
for range devices.deletionWorkerTicker.C {
devices.cleanupDeletedDevices()
}
@ -1002,7 +1002,7 @@ func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error {
}
func (devices *DeviceSet) createBaseImage() error {
logrus.Debugf("devmapper: Initializing base device-mapper thin volume")
logrus.Debug("devmapper: Initializing base device-mapper thin volume")
// Create initial device
info, err := devices.createRegisterDevice("")
@ -1010,7 +1010,7 @@ func (devices *DeviceSet) createBaseImage() error {
return err
}
logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume")
logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume")
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
return err
@ -1188,7 +1188,7 @@ func (devices *DeviceSet) setupBaseImage() error {
return nil
}
logrus.Debugf("devmapper: Removing uninitialized base image")
logrus.Debug("devmapper: Removing uninitialized base image")
// If previous base device is in deferred delete state,
// that needs to be cleaned up first. So don't try
// deferred deletion.
@ -1455,7 +1455,7 @@ func (devices *DeviceSet) refreshTransaction(DeviceID int) error {
func (devices *DeviceSet) closeTransaction() error {
if err := devices.updatePoolTransactionID(); err != nil {
logrus.Debugf("devmapper: Failed to close Transaction")
logrus.Debug("devmapper: Failed to close Transaction")
return err
}
return nil
@ -1644,7 +1644,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
if !devicemapper.LibraryDeferredRemovalSupport {
return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
}
logrus.Debugf("devmapper: Deferred removal support enabled.")
logrus.Debug("devmapper: Deferred removal support enabled.")
devices.deferredRemove = true
}
@ -1652,7 +1652,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
if !devices.deferredRemove {
return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
}
logrus.Debugf("devmapper: Deferred deletion support enabled.")
logrus.Debug("devmapper: Deferred deletion support enabled.")
devices.deferredDelete = true
}
@ -1716,7 +1716,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
// If the pool doesn't exist, create it
if !poolExists && devices.thinPoolDevice == "" {
logrus.Debugf("devmapper: Pool doesn't exist. Creating it.")
logrus.Debug("devmapper: Pool doesn't exist. Creating it.")
var (
dataFile *os.File
@ -2044,8 +2044,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
}
func (devices *DeviceSet) deactivatePool() error {
logrus.Debugf("devmapper: deactivatePool()")
defer logrus.Debugf("devmapper: deactivatePool END")
logrus.Debug("devmapper: deactivatePool()")
defer logrus.Debug("devmapper: deactivatePool END")
devname := devices.getPoolDevName()
devinfo, err := devicemapper.GetInfo(devname)
@ -2304,7 +2304,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
return err
}
logrus.Debugf("devmapper: Unmount done")
logrus.Debug("devmapper: Unmount done")
if err := devices.deactivateDevice(info); err != nil {
return err

View File

@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debugf("Start untar layer")
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}

View File

@ -154,10 +154,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
for {
select {
case <-stop:
logrus.Debugf("Stop healthcheck monitoring (received while idle)")
logrus.Debug("Stop healthcheck monitoring (received while idle)")
return
case <-time.After(probeInterval):
logrus.Debugf("Running health check...")
logrus.Debug("Running health check...")
startTime := time.Now()
ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
results := make(chan *types.HealthcheckResult)
@ -180,7 +180,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
}()
select {
case <-stop:
logrus.Debugf("Stop healthcheck monitoring (received while probing)")
logrus.Debug("Stop healthcheck monitoring (received while probing)")
// Stop timeout and kill probe, but don't wait for probe to exit.
cancelProbe()
return
@ -189,7 +189,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
// Stop timeout
cancelProbe()
case <-ctx.Done():
logrus.Debugf("Health check taking too long")
logrus.Debug("Health check taking too long")
handleProbeResult(d, c, &types.HealthcheckResult{
ExitCode: -1,
Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout),

View File

@ -85,7 +85,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
return nil
case msg, ok := <-logs.Msg:
if !ok {
logrus.Debugf("logs: end stream")
logrus.Debug("logs: end stream")
logs.Close()
return nil
}

View File

@ -89,7 +89,7 @@ func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) erro
return err
}
logrus.Debugf("Retrieving the tag list")
logrus.Debug("Retrieving the tag list")
var tagsList map[string]string
if !isTagged {
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo)

View File

@ -208,7 +208,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
size = 0
} else {
if size != 0 && offset > size {
logrus.Debugf("Partial download is larger than full blob. Starting over")
logrus.Debug("Partial download is larger than full blob. Starting over")
offset = 0
if err := ld.truncateDownloadFile(); err != nil {
return nil, 0, xfer.DoNotRetry{Err: err}

View File

@ -130,7 +130,7 @@ func DetectCompression(source []byte) Compression {
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
logrus.Debugf("Len too short")
logrus.Debug("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
@ -408,7 +408,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
case tar.TypeXGlobalHeader:
logrus.Debugf("PAX Global Extended Headers found and ignored")
logrus.Debug("PAX Global Extended Headers found and ignored")
return nil
default:

View File

@ -155,7 +155,7 @@ func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) {
func (rm *responseModifier) CloseNotify() <-chan bool {
closeNotifier, ok := rm.rw.(http.CloseNotifier)
if !ok {
logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface")
logrus.Error("Internal response writer doesn't support the CloseNotifier interface")
return nil
}
return closeNotifier.CloseNotify()
@ -165,7 +165,7 @@ func (rm *responseModifier) CloseNotify() <-chan bool {
func (rm *responseModifier) Flush() {
flusher, ok := rm.rw.(http.Flusher)
if !ok {
logrus.Errorf("Internal response writer doesn't support the Flusher interface")
logrus.Error("Internal response writer doesn't support the Flusher interface")
return
}

View File

@ -279,7 +279,7 @@ func LogInit(logger DevmapperLogger) {
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
func SetDevDir(dir string) error {
if res := DmSetDevDir(dir); res != 1 {
logrus.Debugf("devicemapper: Error dm_set_dev_dir")
logrus.Debug("devicemapper: Error dm_set_dev_dir")
return ErrSetDevDir
}
return nil

View File

@ -47,7 +47,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
fi, err := os.Stat(target)
if err != nil {
if os.IsNotExist(err) {
logrus.Errorf("There are no more loopback devices available.")
logrus.Error("There are no more loopback devices available.")
}
return nil, ErrAttachLoopbackDevice
}
@ -127,7 +127,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// If the call failed, then free the loopback device
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
logrus.Errorf("Error while cleaning up the loopback device")
logrus.Error("Error while cleaning up the loopback device")
}
loopFile.Close()
return nil, ErrAttachLoopbackDevice

View File

@ -49,11 +49,11 @@ func Trap(cleanup func()) {
}
} else {
// 3 SIGTERM/INT signals received; force exit without cleanup
logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
}
case syscall.SIGQUIT:
DumpStacks()
logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
}
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
os.Exit(128 + int(sig.(syscall.Signal)))

View File

@ -302,10 +302,10 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io
}
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
logrus.Debugf("server supports resume")
logrus.Debug("server supports resume")
return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
}
logrus.Debugf("server doesn't support resume")
logrus.Debug("server doesn't support resume")
return res.Body, nil
}