mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Extract log utils into pkg/log
Docker-DCO-1.1-Signed-off-by: Josiah Kiehl <josiah@capoferro.net> (github: capoferro)
This commit is contained in:
parent
13eb477b62
commit
a02f67be5b
44 changed files with 465 additions and 346 deletions
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/nat"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/parsers/filters"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
|
@ -433,11 +434,11 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
|||
out := engine.NewOutput()
|
||||
remoteVersion, err := out.AddEnv()
|
||||
if err != nil {
|
||||
utils.Errorf("Error reading remote version: %s\n", err)
|
||||
log.Errorf("Error reading remote version: %s\n", err)
|
||||
return err
|
||||
}
|
||||
if _, err := out.Write(body); err != nil {
|
||||
utils.Errorf("Error reading remote version: %s\n", err)
|
||||
log.Errorf("Error reading remote version: %s\n", err)
|
||||
return err
|
||||
}
|
||||
out.Close()
|
||||
|
@ -473,7 +474,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
|||
}
|
||||
|
||||
if _, err := out.Write(body); err != nil {
|
||||
utils.Errorf("Error reading remote info: %s\n", err)
|
||||
log.Errorf("Error reading remote info: %s\n", err)
|
||||
return err
|
||||
}
|
||||
out.Close()
|
||||
|
@ -597,10 +598,10 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
|
|||
}
|
||||
}
|
||||
if sig == "" {
|
||||
utils.Errorf("Unsupported signal: %d. Discarding.", s)
|
||||
log.Errorf("Unsupported signal: %d. Discarding.", s)
|
||||
}
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
|
||||
utils.Debugf("Error sending signal: %s", err)
|
||||
log.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -690,7 +691,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
|
|||
if *openStdin || *attach {
|
||||
if tty && cli.isTerminal {
|
||||
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
|
||||
utils.Errorf("Error monitoring TTY size: %s\n", err)
|
||||
log.Errorf("Error monitoring TTY size: %s\n", err)
|
||||
}
|
||||
}
|
||||
return <-cErr
|
||||
|
@ -1827,7 +1828,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
|
|||
|
||||
if tty && cli.isTerminal {
|
||||
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
|
||||
utils.Debugf("Error monitoring TTY size: %s", err)
|
||||
log.Debugf("Error monitoring TTY size: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2098,9 +2099,9 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
|
||||
// Block the return until the chan gets closed
|
||||
defer func() {
|
||||
utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
|
||||
log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
|
||||
if _, ok := <-hijacked; ok {
|
||||
utils.Errorf("Hijack did not finish (chan still open)")
|
||||
log.Errorf("Hijack did not finish (chan still open)")
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -2146,7 +2147,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
}
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
utils.Debugf("Error hijack: %s", err)
|
||||
log.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -2158,13 +2159,13 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
|
||||
if err := cli.monitorTtySize(runResult.Get("Id")); err != nil {
|
||||
utils.Errorf("Error monitoring TTY size: %s\n", err)
|
||||
log.Errorf("Error monitoring TTY size: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
utils.Debugf("Error hijack: %s", err)
|
||||
log.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
@ -93,7 +94,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
|
|||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
utils.Debugf("[hijack] End of stdout")
|
||||
log.Debugf("[hijack] End of stdout")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
@ -101,15 +102,15 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
|
|||
sendStdin := utils.Go(func() error {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
utils.Debugf("[hijack] End of stdin")
|
||||
log.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
if tcpc, ok := rwc.(*net.TCPConn); ok {
|
||||
if err := tcpc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
log.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
} else if unixc, ok := rwc.(*net.UnixConn); ok {
|
||||
if err := unixc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
log.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
}
|
||||
// Discard errors due to pipe interruption
|
||||
|
@ -118,14 +119,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
|
|||
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
utils.Debugf("Error receiveStdout: %s", err)
|
||||
log.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !cli.isTerminal {
|
||||
if err := <-sendStdin; err != nil {
|
||||
utils.Debugf("Error sendStdin: %s", err)
|
||||
log.Debugf("Error sendStdin: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
|
@ -165,7 +166,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in
|
|||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, resp.Body)
|
||||
}
|
||||
utils.Debugf("[stream] End of stdout")
|
||||
log.Debugf("[stream] End of stdout")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -180,7 +181,7 @@ func (cli *DockerCli) resizeTty(id string) {
|
|||
v.Set("h", strconv.Itoa(height))
|
||||
v.Set("w", strconv.Itoa(width))
|
||||
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
|
||||
utils.Debugf("Error resize: %s", err)
|
||||
log.Debugf("Error resize: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,7 +238,7 @@ func (cli *DockerCli) getTtySize() (int, int) {
|
|||
}
|
||||
ws, err := term.GetWinsize(cli.terminalFd)
|
||||
if err != nil {
|
||||
utils.Debugf("Error getting size: %s", err)
|
||||
log.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,7 +43,7 @@ func DisplayablePorts(ports *engine.Table) string {
|
|||
func MatchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
|
||||
log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
|
||||
}
|
||||
return err == nil && mimetype == expectedType
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
@ -21,17 +20,18 @@ import (
|
|||
"syscall"
|
||||
|
||||
"code.google.com/p/go.net/websocket"
|
||||
"github.com/docker/libcontainer/user"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/listenbuffer"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/systemd"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/user"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -88,7 +88,7 @@ func httpError(w http.ResponseWriter, err error) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
|
||||
log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
|
||||
http.Error(w, err.Error(), statusCode)
|
||||
}
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
|
|||
stdoutBuffer = bytes.NewBuffer(nil)
|
||||
)
|
||||
if err := config.Decode(r.Body); err != nil {
|
||||
utils.Errorf("%s", err)
|
||||
log.Errorf("%s", err)
|
||||
}
|
||||
|
||||
if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
|
||||
|
@ -878,7 +878,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp
|
|||
job.Stdout.Add(ws)
|
||||
job.Stderr.Set(ws)
|
||||
if err := job.Run(); err != nil {
|
||||
utils.Errorf("Error attaching websocket: %s", err)
|
||||
log.Errorf("Error attaching websocket: %s", err)
|
||||
}
|
||||
})
|
||||
h.ServeHTTP(w, r)
|
||||
|
@ -1005,7 +1005,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
|
|||
job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
|
||||
job.Stdout.Add(w)
|
||||
if err := job.Run(); err != nil {
|
||||
utils.Errorf("%s", err.Error())
|
||||
log.Errorf("%s", err.Error())
|
||||
if strings.Contains(err.Error(), "No such container") {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else if strings.Contains(err.Error(), "no such file or directory") {
|
||||
|
@ -1033,16 +1033,16 @@ func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r
|
|||
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// log the request
|
||||
utils.Debugf("Calling %s %s", localMethod, localRoute)
|
||||
log.Debugf("Calling %s %s", localMethod, localRoute)
|
||||
|
||||
if logging {
|
||||
log.Println(r.Method, r.RequestURI)
|
||||
log.Infof("%s %s\n", r.Method, r.RequestURI)
|
||||
}
|
||||
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
|
||||
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
|
||||
if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
|
||||
utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
|
||||
log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
|
||||
}
|
||||
}
|
||||
version := version.Version(mux.Vars(r)["version"])
|
||||
|
@ -1059,7 +1059,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
|
|||
}
|
||||
|
||||
if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
|
||||
utils.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
|
||||
log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
|
||||
httpError(w, err)
|
||||
}
|
||||
}
|
||||
|
@ -1148,7 +1148,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
|
|||
|
||||
for method, routes := range m {
|
||||
for route, fct := range routes {
|
||||
utils.Debugf("Registering %s, %s", method, route)
|
||||
log.Debugf("Registering %s, %s", method, route)
|
||||
// NOTE: scope issue, make sure the variables are local and won't be changed
|
||||
localRoute := route
|
||||
localFct := fct
|
||||
|
@ -1238,7 +1238,7 @@ func changeGroup(addr string, nameOrGid string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
utils.Debugf("%s group found. gid: %d", nameOrGid, gid)
|
||||
log.Debugf("%s group found. gid: %d", nameOrGid, gid)
|
||||
return os.Chown(addr, 0, gid)
|
||||
}
|
||||
|
||||
|
@ -1309,7 +1309,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
|
|||
switch proto {
|
||||
case "tcp":
|
||||
if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
|
||||
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
||||
log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
||||
}
|
||||
case "unix":
|
||||
socketGroup := job.Getenv("SocketGroup")
|
||||
|
@ -1317,7 +1317,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
|
|||
if err := changeGroup(addr, socketGroup); err != nil {
|
||||
if socketGroup == "docker" {
|
||||
// if the user hasn't explicitly specified the group ownership, don't fail on errors.
|
||||
utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
|
||||
log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
@ -1352,7 +1352,7 @@ func ServeApi(job *engine.Job) engine.Status {
|
|||
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
||||
}
|
||||
go func() {
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
|
||||
log.Infof("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -16,9 +16,11 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -61,7 +63,7 @@ func DetectCompression(source []byte) Compression {
|
|||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
utils.Debugf("Len too short")
|
||||
log.Debugf("Len too short")
|
||||
continue
|
||||
}
|
||||
if bytes.Compare(m, source[:len(m)]) == 0 {
|
||||
|
@ -83,7 +85,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("[tar autodetect] n: %v", bs)
|
||||
log.Debugf("[tar autodetect] n: %v", bs)
|
||||
|
||||
compression := DetectCompression(bs)
|
||||
|
||||
|
@ -252,7 +254,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
}
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
utils.Debugf("PAX Global Extended Headers found and ignored")
|
||||
log.Debugf("PAX Global Extended Headers found and ignored")
|
||||
return nil
|
||||
|
||||
default:
|
||||
|
@ -340,7 +342,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
for _, include := range options.Includes {
|
||||
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
|
||||
log.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -351,7 +353,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
skip, err := utils.Matches(relFilePath, options.Excludes)
|
||||
if err != nil {
|
||||
utils.Debugf("Error matching %s\n", relFilePath, err)
|
||||
log.Debugf("Error matching %s\n", relFilePath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -363,7 +365,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
}
|
||||
|
||||
if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
|
||||
utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
||||
log.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -371,13 +373,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
utils.Debugf("Can't close tar writer: %s\n", err)
|
||||
log.Debugf("Can't close tar writer: %s\n", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
utils.Debugf("Can't close compress writer: %s\n", err)
|
||||
log.Debugf("Can't close compress writer: %s\n", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
utils.Debugf("Can't close pipe writer: %s\n", err)
|
||||
log.Debugf("Can't close pipe writer: %s\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -489,7 +491,7 @@ loop:
|
|||
// the output of one piped into the other. If either Tar or Untar fails,
|
||||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s)", src, dst)
|
||||
log.Debugf("TarUntar(%s %s)", src, dst)
|
||||
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -526,11 +528,11 @@ func CopyWithTar(src, dst string) error {
|
|||
return CopyFileWithTar(src, dst)
|
||||
}
|
||||
// Create dst, copy src's content into it
|
||||
utils.Debugf("Creating dest directory: %s", dst)
|
||||
log.Debugf("Creating dest directory: %s", dst)
|
||||
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
log.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
return TarUntar(src, dst)
|
||||
}
|
||||
|
||||
|
@ -541,7 +543,7 @@ func CopyWithTar(src, dst string) error {
|
|||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -11,9 +11,10 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
|
@ -363,19 +364,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
|
|||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
utils.Debugf("Can't write whiteout header: %s\n", err)
|
||||
log.Debugf("Can't write whiteout header: %s\n", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
|
||||
utils.Debugf("Can't add file %s to tar: %s\n", path, err)
|
||||
log.Debugf("Can't add file %s to tar: %s\n", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
utils.Debugf("Can't close layer: %s\n", err)
|
||||
log.Debugf("Can't close layer: %s\n", err)
|
||||
}
|
||||
writer.Close()
|
||||
}()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
|
@ -36,25 +37,25 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|||
cLog, err := container.ReadLog("json")
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// Legacy logs
|
||||
utils.Debugf("Old logs format")
|
||||
log.Debugf("Old logs format")
|
||||
if stdout {
|
||||
cLog, err := container.ReadLog("stdout")
|
||||
if err != nil {
|
||||
utils.Errorf("Error reading logs (stdout): %s", err)
|
||||
log.Errorf("Error reading logs (stdout): %s", err)
|
||||
} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
||||
utils.Errorf("Error streaming logs (stdout): %s", err)
|
||||
log.Errorf("Error streaming logs (stdout): %s", err)
|
||||
}
|
||||
}
|
||||
if stderr {
|
||||
cLog, err := container.ReadLog("stderr")
|
||||
if err != nil {
|
||||
utils.Errorf("Error reading logs (stderr): %s", err)
|
||||
log.Errorf("Error reading logs (stderr): %s", err)
|
||||
} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
||||
utils.Errorf("Error streaming logs (stderr): %s", err)
|
||||
log.Errorf("Error streaming logs (stderr): %s", err)
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
utils.Errorf("Error reading logs (json): %s", err)
|
||||
log.Errorf("Error reading logs (json): %s", err)
|
||||
} else {
|
||||
dec := json.NewDecoder(cLog)
|
||||
for {
|
||||
|
@ -63,7 +64,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
utils.Errorf("Error streaming logs: %s", err)
|
||||
log.Errorf("Error streaming logs: %s", err)
|
||||
break
|
||||
}
|
||||
if l.Stream == "stdout" && stdout {
|
||||
|
@ -88,7 +89,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
defer utils.Debugf("Closing buffered stdin pipe")
|
||||
defer log.Debugf("Closing buffered stdin pipe")
|
||||
io.Copy(w, job.Stdin)
|
||||
}()
|
||||
cStdin = r
|
||||
|
@ -131,8 +132,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
utils.Debugf("attach: stdin: begin")
|
||||
defer utils.Debugf("attach: stdin: end")
|
||||
log.Debugf("attach: stdin: begin")
|
||||
defer log.Debugf("attach: stdin: end")
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
defer cStdin.Close()
|
||||
|
@ -155,7 +156,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdin: %s", err)
|
||||
log.Errorf("attach: stdin: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
|
@ -168,8 +169,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
} else {
|
||||
cStdout = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stdout: begin")
|
||||
defer utils.Debugf("attach: stdout: end")
|
||||
log.Debugf("attach: stdout: begin")
|
||||
defer log.Debugf("attach: stdout: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
|
@ -182,7 +183,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdout: %s", err)
|
||||
log.Errorf("attach: stdout: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
|
@ -193,7 +194,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
defer stdinCloser.Close()
|
||||
}
|
||||
if cStdout, err := container.StdoutPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
log.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStdout)
|
||||
}
|
||||
|
@ -206,8 +207,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
} else {
|
||||
cStderr = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stderr: begin")
|
||||
defer utils.Debugf("attach: stderr: end")
|
||||
log.Debugf("attach: stderr: begin")
|
||||
defer log.Debugf("attach: stderr: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
|
@ -220,7 +221,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stderr: %s", err)
|
||||
log.Errorf("attach: stderr: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
|
@ -232,7 +233,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
}
|
||||
|
||||
if cStderr, err := container.StderrPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
log.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStderr)
|
||||
}
|
||||
|
@ -252,14 +253,14 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
// FIXME: how to clean up the stdin goroutine without the unwanted side effect
|
||||
// of closing the passed stdin? Add an intermediary io.Pipe?
|
||||
for i := 0; i < nJobs; i += 1 {
|
||||
utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
||||
log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
||||
if err := <-errors; err != nil {
|
||||
utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
||||
log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
||||
return err
|
||||
}
|
||||
utils.Debugf("attach: job %d completed successfully", i+1)
|
||||
log.Debugf("attach: job %d completed successfully", i+1)
|
||||
}
|
||||
utils.Debugf("attach: all jobs completed successfully")
|
||||
log.Debugf("attach: all jobs completed successfully")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/nat"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
|
@ -262,11 +263,11 @@ func (b *buildFile) probeCache() (bool, error) {
|
|||
return false, err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
log.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return true, nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
log.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
@ -288,7 +289,7 @@ func (b *buildFile) CmdRun(args string) error {
|
|||
|
||||
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
||||
|
||||
utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
||||
log.Debugf("Command to be executed: %v", b.config.Cmd)
|
||||
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
|
@ -378,7 +379,7 @@ func (b *buildFile) CmdEnv(args string) error {
|
|||
func (b *buildFile) buildCmdFromJson(args string) []string {
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
||||
log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
||||
cmd = []string{"/bin/sh", "-c", args}
|
||||
}
|
||||
return cmd
|
||||
|
@ -551,7 +552,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string, decompre
|
|||
if err := archive.UntarPath(origPath, tarDest); err == nil {
|
||||
return nil
|
||||
} else if err != io.EOF {
|
||||
utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
||||
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -15,6 +14,9 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
"github.com/docker/libcontainer/label"
|
||||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
|
@ -23,13 +25,12 @@ import (
|
|||
"github.com/docker/docker/links"
|
||||
"github.com/docker/docker/nat"
|
||||
"github.com/docker/docker/pkg/broadcastwriter"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/networkfs/etchosts"
|
||||
"github.com/docker/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/devices"
|
||||
"github.com/docker/libcontainer/label"
|
||||
)
|
||||
|
||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
@ -171,7 +172,7 @@ func (container *Container) WriteHostConfig() error {
|
|||
func (container *Container) LogEvent(action string) {
|
||||
d := container.daemon
|
||||
if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
|
||||
utils.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
|
||||
log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,7 +504,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
|
||||
exitCode, err = container.daemon.Run(container, pipes, callback)
|
||||
if err != nil {
|
||||
utils.Errorf("Error running container: %s", err)
|
||||
log.Errorf("Error running container: %s", err)
|
||||
}
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
|
@ -519,7 +520,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
// This will cause it to be restarted when the engine is restarted.
|
||||
if container.daemon != nil && container.daemon.eng != nil && !container.daemon.eng.IsShutdown() {
|
||||
if err := container.toDisk(); err != nil {
|
||||
utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
|
||||
log.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -536,28 +537,28 @@ func (container *Container) cleanup() {
|
|||
}
|
||||
if container.Config.OpenStdin {
|
||||
if err := container.stdin.Close(); err != nil {
|
||||
utils.Errorf("%s: Error close stdin: %s", container.ID, err)
|
||||
log.Errorf("%s: Error close stdin: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
if err := container.stdout.Clean(); err != nil {
|
||||
utils.Errorf("%s: Error close stdout: %s", container.ID, err)
|
||||
log.Errorf("%s: Error close stdout: %s", container.ID, err)
|
||||
}
|
||||
if err := container.stderr.Clean(); err != nil {
|
||||
utils.Errorf("%s: Error close stderr: %s", container.ID, err)
|
||||
log.Errorf("%s: Error close stderr: %s", container.ID, err)
|
||||
}
|
||||
if container.command != nil && container.command.Terminal != nil {
|
||||
if err := container.command.Terminal.Close(); err != nil {
|
||||
utils.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
||||
log.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := container.Unmount(); err != nil {
|
||||
log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
|
||||
log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container) KillSig(sig int) error {
|
||||
utils.Debugf("Sending %d to %s", sig, container.ID)
|
||||
log.Debugf("Sending %d to %s", sig, container.ID)
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
|
@ -606,7 +607,7 @@ func (container *Container) Kill() error {
|
|||
if _, err := container.State.WaitStop(10 * time.Second); err != nil {
|
||||
// Ensure that we don't kill ourselves
|
||||
if pid := container.State.GetPid(); pid != 0 {
|
||||
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := syscall.Kill(pid, 9); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -624,7 +625,7 @@ func (container *Container) Stop(seconds int) error {
|
|||
|
||||
// 1. Send a SIGTERM
|
||||
if err := container.KillSig(15); err != nil {
|
||||
log.Print("Failed to send SIGTERM to the process, force killing")
|
||||
log.Infof("Failed to send SIGTERM to the process, force killing")
|
||||
if err := container.KillSig(9); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -632,7 +633,7 @@ func (container *Container) Stop(seconds int) error {
|
|||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
// 3. If it doesn't, then send SIGKILL
|
||||
if err := container.Kill(); err != nil {
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
|
@ -761,7 +762,7 @@ func (container *Container) GetSize() (int64, int64) {
|
|||
)
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
|
||||
log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
|
||||
return sizeRw, sizeRootfs
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
@ -769,7 +770,7 @@ func (container *Container) GetSize() (int64, int64) {
|
|||
if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
|
||||
sizeRw, err = differ.DiffSize(container.ID)
|
||||
if err != nil {
|
||||
utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
||||
log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
||||
// FIXME: GetSize should return an error. Not changing it now in case
|
||||
// there is a side-effect.
|
||||
sizeRw = -1
|
||||
|
@ -866,7 +867,7 @@ func (container *Container) DisableLink(name string) {
|
|||
if link, exists := container.activeLinks[name]; exists {
|
||||
link.Disable()
|
||||
} else {
|
||||
utils.Debugf("Could not find active link for %s", name)
|
||||
log.Debugf("Could not find active link for %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -978,15 +979,15 @@ func (container *Container) initializeNetworking() error {
|
|||
// Make sure the config is compatible with the current kernel
|
||||
func (container *Container) verifyDaemonSettings() {
|
||||
if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
container.Config.Memory = 0
|
||||
}
|
||||
if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
container.Config.MemorySwap = -1
|
||||
}
|
||||
if container.daemon.sysInfo.IPv4ForwardingDisabled {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1123,7 +1124,7 @@ func (container *Container) waitForStart() error {
|
|||
}
|
||||
container.State.SetRunning(command.Pid())
|
||||
if err := container.toDisk(); err != nil {
|
||||
utils.Debugf("%s", err)
|
||||
log.Debugf("%s", err)
|
||||
}
|
||||
close(waitStart)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
|
@ -13,6 +12,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer/label"
|
||||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/daemon/execdriver/execdrivers"
|
||||
|
@ -27,6 +28,7 @@ import (
|
|||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/broadcastwriter"
|
||||
"github.com/docker/docker/pkg/graphdb"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
"github.com/docker/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
|
@ -35,7 +37,6 @@ import (
|
|||
"github.com/docker/docker/pkg/truncindex"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/label"
|
||||
)
|
||||
|
||||
// Set the max depth to the aufs default that most
|
||||
|
@ -217,7 +218,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
|
|||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.IsRunning() {
|
||||
utils.Debugf("killing old running container %s", container.ID)
|
||||
log.Debugf("killing old running container %s", container.ID)
|
||||
|
||||
existingPid := container.State.Pid
|
||||
container.State.SetStopped(0)
|
||||
|
@ -234,23 +235,23 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
|
|||
var err error
|
||||
cmd.Process, err = os.FindProcess(existingPid)
|
||||
if err != nil {
|
||||
utils.Debugf("cannot find existing process for %d", existingPid)
|
||||
log.Debugf("cannot find existing process for %d", existingPid)
|
||||
}
|
||||
daemon.execDriver.Terminate(cmd)
|
||||
}
|
||||
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("unmount error %s", err)
|
||||
log.Debugf("unmount error %s", err)
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("saving stopped state to disk %s", err)
|
||||
log.Debugf("saving stopped state to disk %s", err)
|
||||
}
|
||||
|
||||
info := daemon.execDriver.Info(container.ID)
|
||||
if !info.IsRunning() {
|
||||
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
||||
log.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
||||
|
||||
utils.Debugf("Marking as stopped")
|
||||
log.Debugf("Marking as stopped")
|
||||
|
||||
container.State.SetStopped(-127)
|
||||
if err := container.ToDisk(); err != nil {
|
||||
|
@ -258,7 +259,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
|
|||
}
|
||||
|
||||
if daemon.config.AutoRestart {
|
||||
utils.Debugf("Marking as restarting")
|
||||
log.Debugf("Marking as restarting")
|
||||
|
||||
if containersToStart != nil {
|
||||
*containersToStart = append(*containersToStart, container)
|
||||
|
@ -278,7 +279,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
|||
container.Name = name
|
||||
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("Error saving container name %s", err)
|
||||
log.Debugf("Error saving container name %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -302,7 +303,7 @@ func (daemon *Daemon) restore() error {
|
|||
)
|
||||
|
||||
if !debug {
|
||||
fmt.Printf("Loading containers: ")
|
||||
log.Infof("Loading containers: ")
|
||||
}
|
||||
dir, err := ioutil.ReadDir(daemon.repository)
|
||||
if err != nil {
|
||||
|
@ -316,16 +317,16 @@ func (daemon *Daemon) restore() error {
|
|||
fmt.Print(".")
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("Failed to load container %v: %v", id, err)
|
||||
log.Errorf("Failed to load container %v: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ignore the container if it does not support the current driver being used by the graph
|
||||
if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver {
|
||||
utils.Debugf("Loaded container %v", container.ID)
|
||||
log.Debugf("Loaded container %v", container.ID)
|
||||
containers[container.ID] = container
|
||||
} else {
|
||||
utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
|
||||
log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,7 +338,7 @@ func (daemon *Daemon) restore() error {
|
|||
e := entities[p]
|
||||
if container, ok := containers[e.ID()]; ok {
|
||||
if err := daemon.register(container, false, &containersToStart); err != nil {
|
||||
utils.Debugf("Failed to register container %s: %s", container.ID, err)
|
||||
log.Debugf("Failed to register container %s: %s", container.ID, err)
|
||||
}
|
||||
delete(containers, e.ID())
|
||||
}
|
||||
|
@ -349,22 +350,22 @@ func (daemon *Daemon) restore() error {
|
|||
// Try to set the default name for a container if it exists prior to links
|
||||
container.Name, err = daemon.generateNewName(container.ID)
|
||||
if err != nil {
|
||||
utils.Debugf("Setting default id - %s", err)
|
||||
log.Debugf("Setting default id - %s", err)
|
||||
}
|
||||
if err := daemon.register(container, false, &containersToStart); err != nil {
|
||||
utils.Debugf("Failed to register container %s: %s", container.ID, err)
|
||||
log.Debugf("Failed to register container %s: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range containersToStart {
|
||||
utils.Debugf("Starting container %d", container.ID)
|
||||
log.Debugf("Starting container %d", container.ID)
|
||||
if err := container.Start(); err != nil {
|
||||
utils.Debugf("Failed to start container %s: %s", container.ID, err)
|
||||
log.Debugf("Failed to start container %s: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !debug {
|
||||
fmt.Printf(": done.\n")
|
||||
log.Infof(": done.\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -707,7 +708,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
log.Fatalf("The Docker daemon needs to be run as root")
|
||||
}
|
||||
if err := checkKernelAndArch(); err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
// set up the TempDir to use a canonical path
|
||||
|
@ -748,7 +749,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("Using graph driver %s", driver)
|
||||
log.Debugf("Using graph driver %s", driver)
|
||||
|
||||
// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
|
||||
if config.EnableSelinuxSupport && driver.String() == "btrfs" {
|
||||
|
@ -766,7 +767,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
utils.Debugf("Creating images graph")
|
||||
log.Debugf("Creating images graph")
|
||||
g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -778,12 +779,12 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("Creating volumes graph")
|
||||
log.Debugf("Creating volumes graph")
|
||||
volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("Creating repository list")
|
||||
log.Debugf("Creating repository list")
|
||||
repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
|
||||
|
@ -862,18 +863,18 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
eng.OnShutdown(func() {
|
||||
// FIXME: if these cleanup steps can be called concurrently, register
|
||||
// them as separate handlers to speed up total shutdown time
|
||||
// FIXME: use engine logging instead of utils.Errorf
|
||||
// FIXME: use engine logging instead of log.Errorf
|
||||
if err := daemon.shutdown(); err != nil {
|
||||
utils.Errorf("daemon.shutdown(): %s", err)
|
||||
log.Errorf("daemon.shutdown(): %s", err)
|
||||
}
|
||||
if err := portallocator.ReleaseAll(); err != nil {
|
||||
utils.Errorf("portallocator.ReleaseAll(): %s", err)
|
||||
log.Errorf("portallocator.ReleaseAll(): %s", err)
|
||||
}
|
||||
if err := daemon.driver.Cleanup(); err != nil {
|
||||
utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
|
||||
log.Errorf("daemon.driver.Cleanup(): %s", err.Error())
|
||||
}
|
||||
if err := daemon.containerGraph.Close(); err != nil {
|
||||
utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
|
||||
log.Errorf("daemon.containerGraph.Close(): %s", err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -882,20 +883,20 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
|
|||
|
||||
func (daemon *Daemon) shutdown() error {
|
||||
group := sync.WaitGroup{}
|
||||
utils.Debugf("starting clean shutdown of all containers...")
|
||||
log.Debugf("starting clean shutdown of all containers...")
|
||||
for _, container := range daemon.List() {
|
||||
c := container
|
||||
if c.State.IsRunning() {
|
||||
utils.Debugf("stopping %s", c.ID)
|
||||
log.Debugf("stopping %s", c.ID)
|
||||
group.Add(1)
|
||||
|
||||
go func() {
|
||||
defer group.Done()
|
||||
if err := c.KillSig(15); err != nil {
|
||||
utils.Debugf("kill 15 error for %s - %s", c.ID, err)
|
||||
log.Debugf("kill 15 error for %s - %s", c.ID, err)
|
||||
}
|
||||
c.State.WaitStop(-1 * time.Second)
|
||||
utils.Debugf("container stopped %s", c.ID)
|
||||
log.Debugf("container stopped %s", c.ID)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
@ -1056,7 +1057,7 @@ func (daemon *Daemon) checkLocaldns() error {
|
|||
return err
|
||||
}
|
||||
if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
|
||||
log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
|
||||
daemon.config.Dns = DefaultDns
|
||||
}
|
||||
return nil
|
||||
|
@ -1107,11 +1108,11 @@ func checkKernelAndArch() error {
|
|||
// the circumstances of pre-3.8 crashes are clearer.
|
||||
// For details see http://github.com/docker/docker/issues/407
|
||||
if k, err := kernel.GetKernelVersion(); err != nil {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
log.Infof("WARNING: %s\n", err)
|
||||
} else {
|
||||
if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
|
||||
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
|
||||
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,14 +6,14 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/aufs"
|
||||
"github.com/docker/docker/graph"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
// Given the graphdriver ad, if it is aufs, then migrate it.
|
||||
// If aufs driver is not built, this func is a noop.
|
||||
func migrateIfAufs(driver graphdriver.Driver, root string) error {
|
||||
if ad, ok := driver.(*aufs.Driver); ok {
|
||||
utils.Debugf("Migrating existing containers")
|
||||
log.Debugf("Migrating existing containers")
|
||||
if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -2,14 +2,13 @@ package daemon
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
// FIXME: rename to ContainerRemove for consistency with the CLI command.
|
||||
|
@ -118,7 +117,7 @@ func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
for volumeId := range volumes {
|
||||
// If the requested volu
|
||||
if c, exists := usedVolumes[volumeId]; exists {
|
||||
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
||||
log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
||||
continue
|
||||
}
|
||||
if err := daemon.Volumes().Delete(volumeId); err != nil {
|
||||
|
@ -153,7 +152,7 @@ func (daemon *Daemon) Destroy(container *Container) error {
|
|||
daemon.containers.Delete(container.ID)
|
||||
|
||||
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
|
||||
utils.Debugf("Unable to remove container from link graph: %s", err)
|
||||
log.Debugf("Unable to remove container from link graph: %s", err)
|
||||
}
|
||||
|
||||
if err := daemon.driver.Remove(container.ID); err != nil {
|
||||
|
|
|
@ -14,13 +14,15 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/kr/pty"
|
||||
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/label"
|
||||
"github.com/docker/libcontainer/mount/nodes"
|
||||
"github.com/kr/pty"
|
||||
)
|
||||
|
||||
const DriverName = "lxc"
|
||||
|
@ -318,7 +320,7 @@ func (i *info) IsRunning() bool {
|
|||
|
||||
output, err := i.driver.getInfo(i.ID)
|
||||
if err != nil {
|
||||
utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
|
||||
log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
|
||||
return false
|
||||
}
|
||||
if strings.Contains(string(output), "RUNNING") {
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
mountpk "github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/label"
|
||||
|
@ -209,7 +210,7 @@ func (a *Driver) Remove(id string) error {
|
|||
defer a.Unlock()
|
||||
|
||||
if a.active[id] != 0 {
|
||||
utils.Errorf("Warning: removing active id %s\n", id)
|
||||
log.Errorf("Warning: removing active id %s\n", id)
|
||||
}
|
||||
|
||||
// Make sure the dir is umounted first
|
||||
|
@ -378,7 +379,7 @@ func (a *Driver) Cleanup() error {
|
|||
|
||||
for _, id := range ids {
|
||||
if err := a.unmount(id); err != nil {
|
||||
utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
|
||||
log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
package aufs
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/utils"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
func Unmount(target string) error {
|
||||
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
||||
utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
|
||||
log.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
|
||||
}
|
||||
if err := syscall.Unmount(target, 0); err != nil {
|
||||
return err
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
func stringToLoopName(src string) [LoNameSize]uint8 {
|
||||
|
@ -39,20 +39,20 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
|
|||
fi, err := os.Stat(target)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
utils.Errorf("There are no more loopback devices available.")
|
||||
log.Errorf("There are no more loopback devices available.")
|
||||
}
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDevice != os.ModeDevice {
|
||||
utils.Errorf("Loopback device %s is not a block device.", target)
|
||||
log.Errorf("Loopback device %s is not a block device.", target)
|
||||
continue
|
||||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
utils.Errorf("Error openning loopback device: %s", err)
|
||||
log.Errorf("Error openning loopback device: %s", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
|
|||
|
||||
// If the error is EBUSY, then try the next loopback
|
||||
if err != syscall.EBUSY {
|
||||
utils.Errorf("Cannot set up loopback device %s: %s", target, err)
|
||||
log.Errorf("Cannot set up loopback device %s: %s", target, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
|
|||
|
||||
// This can't happen, but let's be sure
|
||||
if loopFile == nil {
|
||||
utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
||||
log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
|
@ -91,13 +91,13 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
|
|||
// loopback from index 0.
|
||||
startIndex, err := getNextFreeLoopbackIndex()
|
||||
if err != nil {
|
||||
utils.Debugf("Error retrieving the next available loopback: %s", err)
|
||||
log.Debugf("Error retrieving the next available loopback: %s", err)
|
||||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
|
||||
log.Errorf("Error openning sparse file %s: %s", sparseName, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
defer sparseFile.Close()
|
||||
|
@ -115,11 +115,11 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
|
|||
}
|
||||
|
||||
if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
|
||||
utils.Errorf("Cannot set up loopback device info: %s", err)
|
||||
log.Errorf("Cannot set up loopback device info: %s", err)
|
||||
|
||||
// If the call failed, then free the loopback device
|
||||
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
||||
utils.Errorf("Error while cleaning up the loopback device")
|
||||
log.Errorf("Error while cleaning up the loopback device")
|
||||
}
|
||||
loopFile.Close()
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
|
|
|
@ -19,9 +19,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/label"
|
||||
)
|
||||
|
||||
|
@ -174,7 +174,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
|||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
utils.Debugf("Creating loopback file %s for device-manage use", filename)
|
||||
log.Debugf("Creating loopback file %s for device-manage use", filename)
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -252,7 +252,7 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) {
|
||||
utils.Debugf("registerDevice(%v, %v)", id, hash)
|
||||
log.Debugf("registerDevice(%v, %v)", id, hash)
|
||||
info := &DevInfo{
|
||||
Hash: hash,
|
||||
DeviceId: id,
|
||||
|
@ -278,7 +278,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
|
||||
utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
|
||||
log.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
|
||||
|
||||
if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
|
||||
return nil
|
||||
|
@ -385,13 +385,13 @@ func (devices *DeviceSet) setupBaseImage() error {
|
|||
}
|
||||
|
||||
if oldInfo != nil && !oldInfo.Initialized {
|
||||
utils.Debugf("Removing uninitialized base image")
|
||||
log.Debugf("Removing uninitialized base image")
|
||||
if err := devices.deleteDevice(oldInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
utils.Debugf("Initializing base device-manager snapshot")
|
||||
log.Debugf("Initializing base device-manager snapshot")
|
||||
|
||||
id := devices.nextDeviceId
|
||||
|
||||
|
@ -403,14 +403,14 @@ func (devices *DeviceSet) setupBaseImage() error {
|
|||
// Ids are 24bit, so wrap around
|
||||
devices.nextDeviceId = (id + 1) & 0xffffff
|
||||
|
||||
utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
|
||||
log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
|
||||
info, err := devices.registerDevice(id, "", devices.baseFsSize)
|
||||
if err != nil {
|
||||
_ = deleteDevice(devices.getPoolDevName(), id)
|
||||
return err
|
||||
}
|
||||
|
||||
utils.Debugf("Creating filesystem on base device-manager snapshot")
|
||||
log.Debugf("Creating filesystem on base device-manager snapshot")
|
||||
|
||||
if err = devices.activateDeviceIfNeeded(info); err != nil {
|
||||
return err
|
||||
|
@ -448,7 +448,7 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes
|
|||
return // Ignore _LOG_DEBUG
|
||||
}
|
||||
|
||||
utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
||||
log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
||||
}
|
||||
|
||||
func major(device uint64) uint64 {
|
||||
|
@ -552,13 +552,13 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
// - The target of this device is at major <maj> and minor <min>
|
||||
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
|
||||
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
|
||||
utils.Debugf("Generated prefix: %s", devices.devicePrefix)
|
||||
log.Debugf("Generated prefix: %s", devices.devicePrefix)
|
||||
|
||||
// Check for the existence of the device <prefix>-pool
|
||||
utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
||||
log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
||||
info, err := getInfo(devices.getPoolName())
|
||||
if info == nil {
|
||||
utils.Debugf("Error device getInfo: %s", err)
|
||||
log.Debugf("Error device getInfo: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -574,7 +574,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
|
||||
// If the pool doesn't exist, create it
|
||||
if info.Exists == 0 {
|
||||
utils.Debugf("Pool doesn't exist. Creating it.")
|
||||
log.Debugf("Pool doesn't exist. Creating it.")
|
||||
|
||||
var (
|
||||
dataFile *os.File
|
||||
|
@ -596,7 +596,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
|
||||
data, err := devices.ensureImage("data", devices.dataLoopbackSize)
|
||||
if err != nil {
|
||||
utils.Debugf("Error device ensureImage (data): %s\n", err)
|
||||
log.Debugf("Error device ensureImage (data): %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -627,7 +627,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
|
||||
metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
|
||||
if err != nil {
|
||||
utils.Debugf("Error device ensureImage (metadata): %s\n", err)
|
||||
log.Debugf("Error device ensureImage (metadata): %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -659,7 +659,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
// Setup the base image
|
||||
if doInit {
|
||||
if err := devices.setupBaseImage(); err != nil {
|
||||
utils.Debugf("Error device setupBaseImage: %s\n", err)
|
||||
log.Debugf("Error device setupBaseImage: %s\n", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
|
|||
deviceId := devices.nextDeviceId
|
||||
|
||||
if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
|
||||
utils.Debugf("Error creating snap device: %s\n", err)
|
||||
log.Debugf("Error creating snap device: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -695,7 +695,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
|
|||
|
||||
if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil {
|
||||
deleteDevice(devices.getPoolDevName(), deviceId)
|
||||
utils.Debugf("Error registering device: %s\n", err)
|
||||
log.Debugf("Error registering device: %s\n", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -708,7 +708,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
|||
// manually
|
||||
if err := devices.activateDeviceIfNeeded(info); err == nil {
|
||||
if err := BlockDeviceDiscard(info.DevName()); err != nil {
|
||||
utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
|
||||
log.Debugf("Error discarding block on device: %s (ignoring)\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -716,13 +716,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
|||
devinfo, _ := getInfo(info.Name())
|
||||
if devinfo != nil && devinfo.Exists != 0 {
|
||||
if err := devices.removeDeviceAndWait(info.Name()); err != nil {
|
||||
utils.Debugf("Error removing device: %s\n", err)
|
||||
log.Debugf("Error removing device: %s\n", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
|
||||
utils.Debugf("Error deleting device: %s\n", err)
|
||||
log.Debugf("Error deleting device: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -735,7 +735,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
|||
devices.devicesLock.Lock()
|
||||
devices.Devices[info.Hash] = info
|
||||
devices.devicesLock.Unlock()
|
||||
utils.Debugf("Error removing meta data: %s\n", err)
|
||||
log.Debugf("Error removing meta data: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -758,8 +758,8 @@ func (devices *DeviceSet) DeleteDevice(hash string) error {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) deactivatePool() error {
|
||||
utils.Debugf("[devmapper] deactivatePool()")
|
||||
defer utils.Debugf("[devmapper] deactivatePool END")
|
||||
log.Debugf("[devmapper] deactivatePool()")
|
||||
defer log.Debugf("[devmapper] deactivatePool END")
|
||||
devname := devices.getPoolDevName()
|
||||
devinfo, err := getInfo(devname)
|
||||
if err != nil {
|
||||
|
@ -773,13 +773,13 @@ func (devices *DeviceSet) deactivatePool() error {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
|
||||
utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
|
||||
defer utils.Debugf("[devmapper] deactivateDevice END")
|
||||
log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
|
||||
defer log.Debugf("[devmapper] deactivateDevice END")
|
||||
|
||||
// Wait for the unmount to be effective,
|
||||
// by watching the value of Info.OpenCount for the device
|
||||
if err := devices.waitClose(info); err != nil {
|
||||
utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
|
||||
log.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
|
||||
}
|
||||
|
||||
devinfo, err := getInfo(info.Name())
|
||||
|
@ -829,8 +829,8 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
|
|||
// a) the device registered at <device_set_prefix>-<hash> is removed,
|
||||
// or b) the 10 second timeout expires.
|
||||
func (devices *DeviceSet) waitRemove(devname string) error {
|
||||
utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
|
||||
defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
|
||||
log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
|
||||
defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
|
||||
i := 0
|
||||
for ; i < 1000; i += 1 {
|
||||
devinfo, err := getInfo(devname)
|
||||
|
@ -840,7 +840,7 @@ func (devices *DeviceSet) waitRemove(devname string) error {
|
|||
return nil
|
||||
}
|
||||
if i%100 == 0 {
|
||||
utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
||||
log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
break
|
||||
|
@ -867,7 +867,7 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
|
|||
return err
|
||||
}
|
||||
if i%100 == 0 {
|
||||
utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
|
||||
log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
|
||||
}
|
||||
if devinfo.OpenCount == 0 {
|
||||
break
|
||||
|
@ -884,9 +884,9 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
|
|||
|
||||
func (devices *DeviceSet) Shutdown() error {
|
||||
|
||||
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
||||
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
||||
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
||||
log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
||||
log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
||||
defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
||||
|
||||
var devs []*DevInfo
|
||||
|
||||
|
@ -903,12 +903,12 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
// container. This means it'll go away from the global scope directly,
|
||||
// and the device will be released when that container dies.
|
||||
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
|
||||
utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
|
||||
log.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
|
||||
}
|
||||
|
||||
devices.Lock()
|
||||
if err := devices.deactivateDevice(info); err != nil {
|
||||
utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
|
||||
log.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
|
||||
}
|
||||
devices.Unlock()
|
||||
}
|
||||
|
@ -920,7 +920,7 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
info.lock.Lock()
|
||||
devices.Lock()
|
||||
if err := devices.deactivateDevice(info); err != nil {
|
||||
utils.Debugf("Shutdown deactivate base , error: %s\n", err)
|
||||
log.Debugf("Shutdown deactivate base , error: %s\n", err)
|
||||
}
|
||||
devices.Unlock()
|
||||
info.lock.Unlock()
|
||||
|
@ -928,7 +928,7 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
|
||||
devices.Lock()
|
||||
if err := devices.deactivatePool(); err != nil {
|
||||
utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
|
||||
log.Debugf("Shutdown deactivate pool , error: %s\n", err)
|
||||
}
|
||||
devices.Unlock()
|
||||
|
||||
|
@ -992,8 +992,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) UnmountDevice(hash string) error {
|
||||
utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
|
||||
defer utils.Debugf("[devmapper] UnmountDevice END")
|
||||
log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
|
||||
defer log.Debugf("[devmapper] UnmountDevice END")
|
||||
|
||||
info, err := devices.lookupDevice(hash)
|
||||
if err != nil {
|
||||
|
@ -1015,11 +1015,11 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
utils.Debugf("[devmapper] Unmount(%s)", info.mountPath)
|
||||
log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
|
||||
if err := syscall.Unmount(info.mountPath, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
utils.Debugf("[devmapper] Unmount done")
|
||||
log.Debugf("[devmapper] Unmount done")
|
||||
|
||||
if err := devices.deactivateDevice(info); err != nil {
|
||||
return err
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
type DevmapperLogger interface {
|
||||
|
@ -198,7 +198,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
|
|||
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
||||
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
|
||||
if err != nil {
|
||||
utils.Errorf("Error get loopback backing file: %s\n", err)
|
||||
log.Errorf("Error get loopback backing file: %s\n", err)
|
||||
return 0, 0, ErrGetLoopbackBackingFile
|
||||
}
|
||||
return loopInfo.loDevice, loopInfo.loInode, nil
|
||||
|
@ -206,7 +206,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
|||
|
||||
func LoopbackSetCapacity(file *os.File) error {
|
||||
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
|
||||
utils.Errorf("Error loopbackSetCapacity: %s", err)
|
||||
log.Errorf("Error loopbackSetCapacity: %s", err)
|
||||
return ErrLoopbackSetCapacity
|
||||
}
|
||||
return nil
|
||||
|
@ -246,7 +246,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
|
|||
|
||||
func UdevWait(cookie uint) error {
|
||||
if res := DmUdevWait(cookie); res != 1 {
|
||||
utils.Debugf("Failed to wait on udev cookie %d", cookie)
|
||||
log.Debugf("Failed to wait on udev cookie %d", cookie)
|
||||
return ErrUdevWait
|
||||
}
|
||||
return nil
|
||||
|
@ -265,7 +265,7 @@ func logInit(logger DevmapperLogger) {
|
|||
|
||||
func SetDevDir(dir string) error {
|
||||
if res := DmSetDevDir(dir); res != 1 {
|
||||
utils.Debugf("Error dm_set_dev_dir")
|
||||
log.Debugf("Error dm_set_dev_dir")
|
||||
return ErrSetDevDir
|
||||
}
|
||||
return nil
|
||||
|
@ -286,7 +286,7 @@ func RemoveDevice(name string) error {
|
|||
return ErrCreateRemoveTask
|
||||
}
|
||||
if err := task.SetName(name); err != nil {
|
||||
utils.Debugf("Can't set task name %s", name)
|
||||
log.Debugf("Can't set task name %s", name)
|
||||
return err
|
||||
}
|
||||
if err := task.Run(); err != nil {
|
||||
|
@ -298,7 +298,7 @@ func RemoveDevice(name string) error {
|
|||
func GetBlockDeviceSize(file *os.File) (uint64, error) {
|
||||
size, err := ioctlBlkGetSize64(file.Fd())
|
||||
if err != nil {
|
||||
utils.Errorf("Error getblockdevicesize: %s", err)
|
||||
log.Errorf("Error getblockdevicesize: %s", err)
|
||||
return 0, ErrGetBlockSize
|
||||
}
|
||||
return uint64(size), nil
|
||||
|
@ -417,21 +417,21 @@ func getDriverVersion() (string, error) {
|
|||
func getStatus(name string) (uint64, uint64, string, string, error) {
|
||||
task, err := createTask(DeviceStatus, name)
|
||||
if task == nil {
|
||||
utils.Debugf("getStatus: Error createTask: %s", err)
|
||||
log.Debugf("getStatus: Error createTask: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if err := task.Run(); err != nil {
|
||||
utils.Debugf("getStatus: Error Run: %s", err)
|
||||
log.Debugf("getStatus: Error Run: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
|
||||
devinfo, err := task.GetInfo()
|
||||
if err != nil {
|
||||
utils.Debugf("getStatus: Error GetInfo: %s", err)
|
||||
log.Debugf("getStatus: Error GetInfo: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
utils.Debugf("getStatus: Non existing device %s", name)
|
||||
log.Debugf("getStatus: Non existing device %s", name)
|
||||
return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
|
||||
}
|
||||
|
||||
|
@ -491,7 +491,7 @@ func resumeDevice(name string) error {
|
|||
}
|
||||
|
||||
func createDevice(poolName string, deviceId *int) error {
|
||||
utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
|
||||
log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
|
||||
|
||||
for {
|
||||
task, err := createTask(DeviceTargetMsg, poolName)
|
||||
|
@ -542,8 +542,8 @@ func deleteDevice(poolName string, deviceId int) error {
|
|||
}
|
||||
|
||||
func removeDevice(name string) error {
|
||||
utils.Debugf("[devmapper] removeDevice START")
|
||||
defer utils.Debugf("[devmapper] removeDevice END")
|
||||
log.Debugf("[devmapper] removeDevice START")
|
||||
defer log.Debugf("[devmapper] removeDevice END")
|
||||
task, err := createTask(DeviceRemove, name)
|
||||
if task == nil {
|
||||
return err
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"path"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -138,7 +138,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
|
||||
func (d *Driver) Put(id string) {
|
||||
if err := d.DeviceSet.UnmountDevice(id); err != nil {
|
||||
utils.Errorf("Warning: error unmounting device %s: %s\n", id, err)
|
||||
log.Errorf("Warning: error unmounting device %s: %s\n", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||
"github.com/docker/docker/registry"
|
||||
|
@ -30,7 +31,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
|
|||
operatingSystem = s
|
||||
}
|
||||
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
|
||||
utils.Errorf("Could not determine if daemon is containerized: %v", err)
|
||||
log.Errorf("Could not determine if daemon is containerized: %v", err)
|
||||
operatingSystem += " (error determining if containerized)"
|
||||
} else if inContainer {
|
||||
operatingSystem += " (containerized)"
|
||||
|
|
|
@ -9,11 +9,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/tailfile"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
||||
|
@ -47,31 +47,31 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
|||
cLog, err := container.ReadLog("json")
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// Legacy logs
|
||||
utils.Debugf("Old logs format")
|
||||
log.Debugf("Old logs format")
|
||||
if stdout {
|
||||
cLog, err := container.ReadLog("stdout")
|
||||
if err != nil {
|
||||
utils.Errorf("Error reading logs (stdout): %s", err)
|
||||
log.Errorf("Error reading logs (stdout): %s", err)
|
||||
} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
||||
utils.Errorf("Error streaming logs (stdout): %s", err)
|
||||
log.Errorf("Error streaming logs (stdout): %s", err)
|
||||
}
|
||||
}
|
||||
if stderr {
|
||||
cLog, err := container.ReadLog("stderr")
|
||||
if err != nil {
|
||||
utils.Errorf("Error reading logs (stderr): %s", err)
|
||||
log.Errorf("Error reading logs (stderr): %s", err)
|
||||
} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
||||
utils.Errorf("Error streaming logs (stderr): %s", err)
|
||||
log.Errorf("Error streaming logs (stderr): %s", err)
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
utils.Errorf("Error reading logs (json): %s", err)
|
||||
log.Errorf("Error reading logs (json): %s", err)
|
||||
} else {
|
||||
if tail != "all" {
|
||||
var err error
|
||||
lines, err = strconv.Atoi(tail)
|
||||
if err != nil {
|
||||
utils.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
|
||||
log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
|
||||
lines = -1
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
|||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
utils.Errorf("Error streaming logs: %s", err)
|
||||
log.Errorf("Error streaming logs: %s", err)
|
||||
break
|
||||
}
|
||||
logLine := l.Log
|
||||
|
@ -127,7 +127,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
|||
}
|
||||
err := <-errors
|
||||
if err != nil {
|
||||
utils.Errorf("%s", err)
|
||||
log.Errorf("%s", err)
|
||||
}
|
||||
}
|
||||
return engine.StatusOK
|
||||
|
|
|
@ -3,7 +3,6 @@ package bridge
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -14,9 +13,9 @@ import (
|
|||
"github.com/docker/docker/daemon/networkdriver/portmapper"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/iptables"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/netlink"
|
||||
)
|
||||
|
||||
|
@ -197,7 +196,7 @@ func setupIPTables(addr net.Addr, icc bool) error {
|
|||
iptables.Raw(append([]string{"-D"}, acceptArgs...)...)
|
||||
|
||||
if !iptables.Exists(dropArgs...) {
|
||||
utils.Debugf("Disable inter-container communication")
|
||||
log.Debugf("Disable inter-container communication")
|
||||
if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil {
|
||||
return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
|
@ -208,7 +207,7 @@ func setupIPTables(addr net.Addr, icc bool) error {
|
|||
iptables.Raw(append([]string{"-D"}, dropArgs...)...)
|
||||
|
||||
if !iptables.Exists(acceptArgs...) {
|
||||
utils.Debugf("Enable inter-container communication")
|
||||
log.Debugf("Enable inter-container communication")
|
||||
if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil {
|
||||
return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
|
@ -272,7 +271,7 @@ func createBridge(bridgeIP string) error {
|
|||
ifaceAddr = addr
|
||||
break
|
||||
} else {
|
||||
utils.Debugf("%s %s", addr, err)
|
||||
log.Debugf("%s %s", addr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +280,7 @@ func createBridge(bridgeIP string) error {
|
|||
if ifaceAddr == "" {
|
||||
return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface)
|
||||
}
|
||||
utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
|
||||
log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
|
||||
|
||||
if err := createBridgeIface(bridgeIface); err != nil {
|
||||
return err
|
||||
|
@ -311,7 +310,7 @@ func createBridgeIface(name string) error {
|
|||
// only set the bridge's mac address if the kernel version is > 3.3
|
||||
// before that it was not supported
|
||||
setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3)
|
||||
utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
|
||||
log.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
|
||||
return netlink.CreateBridge(name, setBridgeMacAddr)
|
||||
}
|
||||
|
||||
|
@ -364,12 +363,12 @@ func Release(job *engine.Job) engine.Status {
|
|||
|
||||
for _, nat := range containerInterface.PortMappings {
|
||||
if err := portmapper.Unmap(nat); err != nil {
|
||||
log.Printf("Unable to unmap port %s: %s", nat, err)
|
||||
log.Infof("Unable to unmap port %s: %s", nat, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
|
||||
log.Printf("Unable to release ip %s\n", err)
|
||||
log.Infof("Unable to release ip %s\n", err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
// CmdImageExport exports all images with the given tag. All versions
|
||||
|
@ -30,7 +30,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
|||
}
|
||||
defer os.RemoveAll(tempdir)
|
||||
|
||||
utils.Debugf("Serializing %s", name)
|
||||
log.Debugf("Serializing %s", name)
|
||||
|
||||
rootRepoMap := map[string]Repository{}
|
||||
rootRepo, err := s.Get(name)
|
||||
|
@ -77,7 +77,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
|||
return job.Error(err)
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("There were no repositories to write")
|
||||
log.Debugf("There were no repositories to write")
|
||||
}
|
||||
|
||||
fs, err := archive.Tar(tempdir, archive.Uncompressed)
|
||||
|
@ -89,7 +89,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
|||
if _, err := io.Copy(job.Stdout, fs); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
utils.Debugf("End Serializing %s", name)
|
||||
log.Debugf("End Serializing %s", name)
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/truncindex"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
|
@ -64,7 +65,7 @@ func (graph *Graph) restore() error {
|
|||
}
|
||||
}
|
||||
graph.idIndex = truncindex.NewTruncIndex(ids)
|
||||
utils.Debugf("Restored %d elements", len(dir))
|
||||
log.Debugf("Restored %d elements", len(dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
// Loads a set of images into the repository. This is the complementary of ImageExport.
|
||||
|
@ -93,22 +93,22 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
|||
|
||||
func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
|
||||
if err := eng.Job("image_get", address).Run(); err != nil {
|
||||
utils.Debugf("Loading %s", address)
|
||||
log.Debugf("Loading %s", address)
|
||||
|
||||
imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
|
||||
if err != nil {
|
||||
utils.Debugf("Error reading json", err)
|
||||
log.Debugf("Error reading json", err)
|
||||
return err
|
||||
}
|
||||
|
||||
layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
|
||||
if err != nil {
|
||||
utils.Debugf("Error reading embedded tar", err)
|
||||
log.Debugf("Error reading embedded tar", err)
|
||||
return err
|
||||
}
|
||||
img, err := image.NewImgJSON(imageJson)
|
||||
if err != nil {
|
||||
utils.Debugf("Error unmarshalling json", err)
|
||||
log.Debugf("Error unmarshalling json", err)
|
||||
return err
|
||||
}
|
||||
if img.Parent != "" {
|
||||
|
@ -122,7 +122,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string
|
|||
return err
|
||||
}
|
||||
}
|
||||
utils.Debugf("Completed processing %s", address)
|
||||
log.Debugf("Completed processing %s", address)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
@ -85,10 +86,10 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName,
|
|||
}
|
||||
}
|
||||
|
||||
utils.Debugf("Retrieving the tag list")
|
||||
log.Debugf("Retrieving the tag list")
|
||||
tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
|
||||
if err != nil {
|
||||
utils.Errorf("%v", err)
|
||||
log.Errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -100,7 +101,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName,
|
|||
}
|
||||
}
|
||||
|
||||
utils.Debugf("Registering tags")
|
||||
log.Debugf("Registering tags")
|
||||
// If no tag has been specified, pull them all
|
||||
if askedTag == "" {
|
||||
for tag, id := range tagsList {
|
||||
|
@ -119,7 +120,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName,
|
|||
for _, image := range repoData.ImgList {
|
||||
downloadImage := func(img *registry.ImgData) {
|
||||
if askedTag != "" && img.Tag != askedTag {
|
||||
utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
||||
log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
||||
if parallel {
|
||||
errors <- nil
|
||||
}
|
||||
|
@ -127,7 +128,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName,
|
|||
}
|
||||
|
||||
if img.Tag == "" {
|
||||
utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
||||
log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
||||
if parallel {
|
||||
errors <- nil
|
||||
}
|
||||
|
@ -141,7 +142,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName,
|
|||
<-c
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
|
||||
} else {
|
||||
utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
||||
log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
||||
}
|
||||
if parallel {
|
||||
errors <- nil
|
||||
|
@ -224,7 +225,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
|
|||
|
||||
// ensure no two downloads of the same layer happen at the same time
|
||||
if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
|
||||
utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
||||
log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
||||
<-c
|
||||
}
|
||||
defer s.poolRemove("pull", "layer:"+id)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
@ -54,15 +55,15 @@ func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string
|
|||
if len(imageList) == 0 {
|
||||
return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
|
||||
}
|
||||
utils.Debugf("Image list: %v", imageList)
|
||||
utils.Debugf("Tags by image: %v", tagsByImage)
|
||||
log.Debugf("Image list: %v", imageList)
|
||||
log.Debugf("Tags by image: %v", tagsByImage)
|
||||
|
||||
return imageList, tagsByImage, nil
|
||||
}
|
||||
|
||||
func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
|
||||
out = utils.NewWriteFlusher(out)
|
||||
utils.Debugf("Local repo: %s", localRepo)
|
||||
log.Debugf("Local repo: %s", localRepo)
|
||||
imgList, tagsByImage, err := s.getImageList(localRepo, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -96,9 +97,9 @@ func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName,
|
|||
}
|
||||
}
|
||||
|
||||
utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
||||
log.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
||||
for _, data := range imageIndex {
|
||||
utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
||||
log.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
||||
}
|
||||
|
||||
// Register all the images in a repository with the registry
|
||||
|
@ -170,7 +171,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID,
|
|||
defer os.RemoveAll(layerData.Name())
|
||||
|
||||
// Send the layer
|
||||
utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
|
||||
log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
|
||||
|
||||
checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
func (s *TagStore) Install(eng *engine.Engine) error {
|
||||
|
@ -173,7 +173,7 @@ func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
|
|||
if written, err := io.Copy(job.Stdout, fs); err != nil {
|
||||
return job.Error(err)
|
||||
} else {
|
||||
utils.Debugf("rendered layer for %s of [%d] size", image.ID, written)
|
||||
log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
|
||||
}
|
||||
|
||||
return engine.StatusOK
|
||||
|
|
|
@ -3,15 +3,17 @@ package image
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/archive"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
type Image struct {
|
||||
|
@ -87,11 +89,11 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro
|
|||
}
|
||||
} else {
|
||||
start := time.Now().UTC()
|
||||
utils.Debugf("Start untar layer")
|
||||
log.Debugf("Start untar layer")
|
||||
if err := archive.ApplyLayer(layer, layerData); err != nil {
|
||||
return err
|
||||
}
|
||||
utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
|
||||
if img.Parent == "" {
|
||||
if size, err = utils.TreeSize(layer); err != nil {
|
||||
|
@ -299,7 +301,7 @@ func (img *Image) Depth() (int, error) {
|
|||
func NewImgJSON(src []byte) (*Image, error) {
|
||||
ret := &Image{}
|
||||
|
||||
utils.Debugf("Json string: {%s}", src)
|
||||
log.Debugf("Json string: {%s}", src)
|
||||
// FIXME: Is there a cleaner way to "purify" the input json?
|
||||
if err := json.Unmarshal(src, ret); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/client"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
@ -174,7 +175,7 @@ func TestRunDisconnectTty(t *testing.T) {
|
|||
// We're simulating a disconnect so the return value doesn't matter. What matters is the
|
||||
// fact that CmdRun returns.
|
||||
if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
|
||||
utils.Debugf("Error CmdRun: %s", err)
|
||||
log.Debugf("Error CmdRun: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -414,7 +415,7 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
go func() {
|
||||
// Start a process in daemon mode
|
||||
if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
|
||||
utils.Debugf("Error CmdRun: %s", err)
|
||||
log.Debugf("Error CmdRun: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
std_log "log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/nat"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/reexec"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
|
@ -99,7 +100,7 @@ func init() {
|
|||
}
|
||||
|
||||
if uid := syscall.Geteuid(); uid != 0 {
|
||||
log.Fatal("docker tests need to be run as root")
|
||||
log.Fatalf("docker tests need to be run as root")
|
||||
}
|
||||
|
||||
// Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary)
|
||||
|
@ -133,7 +134,7 @@ func init() {
|
|||
}
|
||||
|
||||
func setupBaseImage() {
|
||||
eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase)
|
||||
eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase)
|
||||
job := eng.Job("image_inspect", unitTestImageName)
|
||||
img, _ := job.Stdout.AddEnv()
|
||||
// If the unit test is not found, try to download it.
|
||||
|
@ -149,17 +150,17 @@ func setupBaseImage() {
|
|||
|
||||
func spawnGlobalDaemon() {
|
||||
if globalDaemon != nil {
|
||||
utils.Debugf("Global daemon already exists. Skipping.")
|
||||
log.Debugf("Global daemon already exists. Skipping.")
|
||||
return
|
||||
}
|
||||
t := log.New(os.Stderr, "", 0)
|
||||
t := std_log.New(os.Stderr, "", 0)
|
||||
eng := NewTestEngine(t)
|
||||
globalEngine = eng
|
||||
globalDaemon = mkDaemonFromEngine(eng, t)
|
||||
|
||||
// Spawn a Daemon
|
||||
go func() {
|
||||
utils.Debugf("Spawning global daemon for integration tests")
|
||||
log.Debugf("Spawning global daemon for integration tests")
|
||||
listenURL := &url.URL{
|
||||
Scheme: testDaemonProto,
|
||||
Host: testDaemonAddr,
|
||||
|
@ -197,7 +198,7 @@ func spawnRogueHttpsDaemon() {
|
|||
}
|
||||
|
||||
func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
|
||||
t := log.New(os.Stderr, "", 0)
|
||||
t := std_log.New(os.Stderr, "", 0)
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -209,7 +210,7 @@ func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
|
|||
|
||||
// Spawn a Daemon
|
||||
go func() {
|
||||
utils.Debugf("Spawning https daemon for integration tests")
|
||||
log.Debugf("Spawning https daemon for integration tests")
|
||||
listenURL := &url.URL{
|
||||
Scheme: testDaemonHttpsProto,
|
||||
Host: addr,
|
||||
|
|
|
@ -4,11 +4,11 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
// BroadcastWriter accumulate multiple io.WriteCloser by stream.
|
||||
|
@ -56,7 +56,7 @@ func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
|
|||
}
|
||||
b, err := json.Marshal(jsonlog.JSONLog{Log: line, Stream: stream, Created: created})
|
||||
if err != nil {
|
||||
log.Printf("Error making JSON log line: %s", err)
|
||||
log.Errorf("Error making JSON log line: %s", err)
|
||||
continue
|
||||
}
|
||||
b = append(b, '\n')
|
||||
|
|
|
@ -3,9 +3,10 @@ package httputils
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
type resumableRequestReader struct {
|
||||
|
@ -71,7 +72,7 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
|
|||
r.cleanUpResponse()
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
log.Printf("encountered error during pull and clearing it before resume: %s", err)
|
||||
log.Infof("encountered error during pull and clearing it before resume: %s", err)
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
|
|
77
pkg/log/log.go
Normal file
77
pkg/log/log.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type priority int
|
||||
|
||||
const (
|
||||
errorFormat = "[%s] %s:%d %s\n"
|
||||
logFormat = "[%s] %s\n"
|
||||
|
||||
fatal priority = iota
|
||||
error
|
||||
info
|
||||
debug
|
||||
)
|
||||
|
||||
func (p priority) String() string {
|
||||
switch p {
|
||||
case fatal:
|
||||
return "fatal"
|
||||
case error:
|
||||
return "error"
|
||||
case info:
|
||||
return "info"
|
||||
case debug:
|
||||
return "debug"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Debug function, if the debug flag is set, then display. Do nothing otherwise
|
||||
// If Docker is in damon mode, also send the debug info on the socket
|
||||
func Debugf(format string, a ...interface{}) {
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
logf(os.Stderr, debug, format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func Infof(format string, a ...interface{}) {
|
||||
logf(os.Stdout, info, format, a...)
|
||||
}
|
||||
|
||||
func Errorf(format string, a ...interface{}) {
|
||||
logf(os.Stderr, error, format, a...)
|
||||
}
|
||||
|
||||
func Fatalf(format string, a ...interface{}) {
|
||||
logf(os.Stderr, fatal, format, a...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func logf(stream io.Writer, level priority, format string, a ...interface{}) {
|
||||
var prefix string
|
||||
|
||||
if level <= error || level == debug {
|
||||
// Retrieve the stack infos
|
||||
_, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
file = "<unknown>"
|
||||
line = -1
|
||||
} else {
|
||||
file = file[strings.LastIndex(file, "/")+1:]
|
||||
}
|
||||
prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format)
|
||||
} else {
|
||||
prefix = fmt.Sprintf(logFormat, level.String(), format)
|
||||
}
|
||||
|
||||
fmt.Fprintf(stream, prefix, a...)
|
||||
}
|
37
pkg/log/log_test.go
Normal file
37
pkg/log/log_test.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLogFatalf(t *testing.T) {
|
||||
var output *bytes.Buffer
|
||||
|
||||
tests := []struct {
|
||||
Level priority
|
||||
Format string
|
||||
Values []interface{}
|
||||
ExpectedPattern string
|
||||
}{
|
||||
{fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
||||
{error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
||||
{info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"},
|
||||
{debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
output = &bytes.Buffer{}
|
||||
logf(output, test.Level, test.Format, test.Values...)
|
||||
|
||||
expected := regexp.MustCompile(test.ExpectedPattern)
|
||||
if !expected.MatchString(output.String()) {
|
||||
t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s",
|
||||
i,
|
||||
expected.String(),
|
||||
output.String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,12 +7,13 @@ import (
|
|||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
type TarSum struct {
|
||||
|
@ -170,11 +171,11 @@ func (ts *TarSum) Sum(extra []byte) string {
|
|||
h.Write(extra)
|
||||
}
|
||||
for _, sum := range sums {
|
||||
log.Printf("-->%s<--", sum)
|
||||
log.Infof("-->%s<--", sum)
|
||||
h.Write([]byte(sum))
|
||||
}
|
||||
checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil))
|
||||
log.Printf("checksum processed: %s", checksum)
|
||||
log.Infof("checksum processed: %s", checksum)
|
||||
return checksum
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
|
@ -186,17 +187,17 @@ func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) {
|
|||
Standalone: true,
|
||||
}
|
||||
if err := json.Unmarshal(jsonString, &info); err != nil {
|
||||
utils.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
|
||||
log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
|
||||
// don't stop here. Just assume sane defaults
|
||||
}
|
||||
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
||||
utils.Debugf("Registry version header: '%s'", hdr)
|
||||
log.Debugf("Registry version header: '%s'", hdr)
|
||||
info.Version = hdr
|
||||
}
|
||||
utils.Debugf("RegistryInfo.Version: %q", info.Version)
|
||||
log.Debugf("RegistryInfo.Version: %q", info.Version)
|
||||
|
||||
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
||||
utils.Debugf("Registry standalone header: '%s'", standalone)
|
||||
log.Debugf("Registry standalone header: '%s'", standalone)
|
||||
// Accepted values are "true" (case-insensitive) and "1".
|
||||
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
||||
info.Standalone = true
|
||||
|
@ -204,7 +205,7 @@ func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) {
|
|||
// there is a header set, and it is not "true" or "1", so assume fails
|
||||
info.Standalone = false
|
||||
}
|
||||
utils.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
|
||||
log.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
|
||||
return info, nil
|
||||
}
|
||||
|
||||
|
@ -274,7 +275,7 @@ func ExpandAndVerifyRegistryUrl(hostname string) (string, error) {
|
|||
}
|
||||
endpoint := fmt.Sprintf("https://%s/v1/", hostname)
|
||||
if _, err := pingRegistryEndpoint(endpoint); err != nil {
|
||||
utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
||||
log.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
||||
endpoint = fmt.Sprintf("http://%s/v1/", hostname)
|
||||
if _, err = pingRegistryEndpoint(endpoint); err != nil {
|
||||
//TODO: triggering highland build can be done there without "failing"
|
||||
|
|
|
@ -3,8 +3,6 @@ package registry
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
@ -14,6 +12,10 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -96,7 +98,7 @@ func init() {
|
|||
|
||||
func handlerAccessLog(handler http.Handler) http.Handler {
|
||||
logHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||
utils.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
|
||||
log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
||||
return http.HandlerFunc(logHandler)
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/docker/nat"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
func Merge(userConf, imageConf *Config) error {
|
||||
|
@ -50,7 +50,7 @@ func Merge(userConf, imageConf *Config) error {
|
|||
}
|
||||
if len(imageConf.PortSpecs) > 0 {
|
||||
// FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia.
|
||||
utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
|
||||
log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
|
||||
if userConf.ExposedPorts == nil {
|
||||
userConf.ExposedPorts = make(nat.PortSet)
|
||||
}
|
||||
|
|
|
@ -226,7 +226,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
|
|||
// parse the '-e' and '--env' after, to allow override
|
||||
envVariables = append(envVariables, flEnv.GetAll()...)
|
||||
// boo, there's no debug output for docker run
|
||||
//utils.Debugf("Environment variables for the container: %#v", envVariables)
|
||||
//log.Debugf("Environment variables for the container: %#v", envVariables)
|
||||
|
||||
netMode, err := parseNetMode(*flNetMode)
|
||||
if err != nil {
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
// VersionInfo is used to model entities which has a version.
|
||||
|
@ -157,6 +159,6 @@ func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
Debugf("%v -- HEADERS: %v", req.URL, req.Header)
|
||||
log.Debugf("%v -- HEADERS: %v", req.URL, req.Header)
|
||||
return req, err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -85,13 +87,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
|
|||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < StdWriterPrefixLen {
|
||||
Debugf("Corrupted prefix: %v", buf[:nr])
|
||||
log.Debugf("Corrupted prefix: %v", buf[:nr])
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
Debugf("Error reading header: %s", er)
|
||||
log.Debugf("Error reading header: %s", er)
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
@ -107,18 +109,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
|
|||
// Write on stderr
|
||||
out = dsterr
|
||||
default:
|
||||
Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
|
||||
log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
|
||||
return 0, ErrInvalidStdHeader
|
||||
}
|
||||
|
||||
// Retrieve the size of the frame
|
||||
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
|
||||
Debugf("framesize: %d", frameSize)
|
||||
log.Debugf("framesize: %d", frameSize)
|
||||
|
||||
// Check if the buffer is big enough to read the frame.
|
||||
// Extend it if necessary.
|
||||
if frameSize+StdWriterPrefixLen > bufLen {
|
||||
Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
|
||||
log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
|
||||
buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
|
||||
bufLen = len(buf)
|
||||
}
|
||||
|
@ -130,13 +132,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
|
|||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < frameSize+StdWriterPrefixLen {
|
||||
Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
|
||||
log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
Debugf("Error reading frame: %s", er)
|
||||
log.Debugf("Error reading frame: %s", er)
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
@ -144,12 +146,12 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
|
|||
// Write the retrieved frame (without header)
|
||||
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
|
||||
if ew != nil {
|
||||
Debugf("Error writing frame: %s", ew)
|
||||
log.Debugf("Error writing frame: %s", ew)
|
||||
return 0, ew
|
||||
}
|
||||
// If the frame has not been fully written: error
|
||||
if nw != frameSize {
|
||||
Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
||||
log.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
written += int64(nw)
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
)
|
||||
|
||||
type KeyValuePair struct {
|
||||
|
@ -54,31 +55,6 @@ func Download(url string) (resp *http.Response, err error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func logf(level string, format string, a ...interface{}) {
|
||||
// Retrieve the stack infos
|
||||
_, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
file = "<unknown>"
|
||||
line = -1
|
||||
} else {
|
||||
file = file[strings.LastIndex(file, "/")+1:]
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
|
||||
}
|
||||
|
||||
// Debug function, if the debug flag is set, then display. Do nothing otherwise
|
||||
// If Docker is in damon mode, also send the debug info on the socket
|
||||
func Debugf(format string, a ...interface{}) {
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
logf("debug", format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func Errorf(format string, a ...interface{}) {
|
||||
logf("error", format, a...)
|
||||
}
|
||||
|
||||
func Trunc(s string, maxlen int) string {
|
||||
if len(s) <= maxlen {
|
||||
return s
|
||||
|
@ -264,7 +240,7 @@ func (r *bufReader) Close() error {
|
|||
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
|
@ -705,15 +681,15 @@ func Matches(relFilePath string, patterns []string) (bool, error) {
|
|||
for _, exclude := range patterns {
|
||||
matched, err := filepath.Match(exclude, relFilePath)
|
||||
if err != nil {
|
||||
Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
|
||||
log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
|
||||
return false, err
|
||||
}
|
||||
if matched {
|
||||
if filepath.Clean(relFilePath) == "." {
|
||||
Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
|
||||
log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
|
||||
continue
|
||||
}
|
||||
Debugf("Skipping excluded path: %s", relFilePath)
|
||||
log.Debugf("Skipping excluded path: %s", relFilePath)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue