Removing dependencies from pkg into Docker internal code

Closes #10922

Signed-off-by: Srini Brahmaroutu <srbrahma@us.ibm.com>
This commit is contained in:
Srini Brahmaroutu 2015-02-21 04:48:23 +00:00
parent 05ba127f06
commit 7a9c944b82
20 changed files with 146 additions and 129 deletions

View File

@ -33,6 +33,7 @@ import (
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/opts" "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag" flag "github.com/docker/docker/pkg/mflag"
@ -1075,7 +1076,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
if *noTrunc { if *noTrunc {
fmt.Fprintf(w, "%s\t", outID) fmt.Fprintf(w, "%s\t", outID)
} else { } else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) fmt.Fprintf(w, "%s\t", common.TruncateID(outID))
} }
fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
@ -1090,7 +1091,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
if *noTrunc { if *noTrunc {
fmt.Fprintln(w, outID) fmt.Fprintln(w, outID)
} else { } else {
fmt.Fprintln(w, utils.TruncateID(outID)) fmt.Fprintln(w, common.TruncateID(outID))
} }
} }
} }
@ -1384,7 +1385,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
} }
if matchName != "" { if matchName != "" {
if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) { if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) {
startImage = image startImage = image
} }
@ -1453,7 +1454,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
repo, tag := parsers.ParseRepositoryTag(repotag) repo, tag := parsers.ParseRepositoryTag(repotag)
outID := out.Get("Id") outID := out.Get("Id")
if !*noTrunc { if !*noTrunc {
outID = utils.TruncateID(outID) outID = common.TruncateID(outID)
} }
if !*quiet { if !*quiet {
@ -1508,8 +1509,8 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin
imageID = image.Get("Id") imageID = image.Get("Id")
parentID = image.Get("ParentId") parentID = image.Get("ParentId")
} else { } else {
imageID = utils.TruncateID(image.Get("Id")) imageID = common.TruncateID(image.Get("Id"))
parentID = utils.TruncateID(image.Get("ParentId")) parentID = common.TruncateID(image.Get("ParentId"))
} }
if parentID == "" { if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
@ -1528,7 +1529,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri
if noTrunc { if noTrunc {
imageID = image.Get("Id") imageID = image.Get("Id")
} else { } else {
imageID = utils.TruncateID(image.Get("Id")) imageID = common.TruncateID(image.Get("Id"))
} }
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize"))))
@ -1636,7 +1637,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
outID := out.Get("Id") outID := out.Get("Id")
if !*noTrunc { if !*noTrunc {
outID = utils.TruncateID(outID) outID = common.TruncateID(outID)
} }
if *quiet { if *quiet {

View File

@ -32,6 +32,7 @@ import (
"github.com/docker/docker/builder/parser" "github.com/docker/docker/builder/parser"
"github.com/docker/docker/daemon" "github.com/docker/docker/daemon"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/tarsum"
@ -155,7 +156,7 @@ func (b *Builder) Run(context io.Reader) (string, error) {
} }
return "", err return "", err
} }
fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image)) fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image))
if b.Remove { if b.Remove {
b.clearTmp() b.clearTmp()
} }
@ -165,7 +166,7 @@ func (b *Builder) Run(context io.Reader) (string, error) {
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
} }
fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image))
return b.image, nil return b.image, nil
} }

View File

@ -25,6 +25,7 @@ import (
imagepkg "github.com/docker/docker/image" imagepkg "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
@ -531,7 +532,7 @@ func (b *Builder) create() (*daemon.Container, error) {
} }
b.TmpContainers[c.ID] = struct{}{} b.TmpContainers[c.ID] = struct{}{}
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID))
if len(config.Cmd) > 0 { if len(config.Cmd) > 0 {
// override the entry point that may have been picked up from the base image // override the entry point that may have been picked up from the base image
@ -713,11 +714,11 @@ func (b *Builder) clearTmp() {
} }
if err := b.Daemon.Rm(tmp); err != nil { if err := b.Daemon.Rm(tmp); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", common.TruncateID(c), err.Error())
return return
} }
b.Daemon.DeleteVolumes(tmp.VolumePaths()) b.Daemon.DeleteVolumes(tmp.VolumePaths())
delete(b.TmpContainers, c) delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c))
} }
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/networkfs/etchosts" "github.com/docker/docker/pkg/networkfs/etchosts"
"github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/networkfs/resolvconf"
@ -704,7 +705,7 @@ func (container *Container) Kill() error {
if _, err := container.WaitStop(10 * time.Second); err != nil { if _, err := container.WaitStop(10 * time.Second); err != nil {
// Ensure that we don't kill ourselves // Ensure that we don't kill ourselves
if pid := container.GetPid(); pid != 0 { if pid := container.GetPid(); pid != 0 {
log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", common.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil { if err := syscall.Kill(pid, 9); err != nil {
if err != syscall.ESRCH { if err != syscall.ESRCH {
return err return err

View File

@ -31,6 +31,7 @@ import (
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/namesgenerator"
@ -511,7 +512,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
var ( var (
err error err error
id = utils.GenerateRandomID() id = common.GenerateRandomID()
) )
if name == "" { if name == "" {
@ -556,7 +557,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
nameAsKnownByUser := strings.TrimPrefix(name, "/") nameAsKnownByUser := strings.TrimPrefix(name, "/")
return "", fmt.Errorf( return "", fmt.Errorf(
"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser, "Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
utils.TruncateID(conflictingContainer.ID)) common.TruncateID(conflictingContainer.ID))
} }
} }
return name, nil return name, nil
@ -579,7 +580,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
return name, nil return name, nil
} }
name = "/" + utils.TruncateID(id) name = "/" + common.TruncateID(id)
if _, err := daemon.containerGraph.Set(name, id); err != nil { if _, err := daemon.containerGraph.Set(name, id); err != nil {
return "", err return "", err
} }

View File

@ -12,10 +12,10 @@ import (
"github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/daemon/execdriver/lxc"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
) )
type execConfig struct { type execConfig struct {
@ -141,7 +141,7 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
} }
execConfig := &execConfig{ execConfig := &execConfig{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
OpenStdin: config.AttachStdin, OpenStdin: config.AttachStdin,
OpenStdout: config.AttachStdout, OpenStdout: config.AttachStdout,
OpenStderr: config.AttachStderr, OpenStderr: config.AttachStderr,

View File

@ -34,6 +34,7 @@ import (
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/common"
mountpk "github.com/docker/docker/pkg/mount" mountpk "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"github.com/docker/libcontainer/label" "github.com/docker/libcontainer/label"
@ -404,7 +405,7 @@ func (a *Driver) Cleanup() error {
for _, id := range ids { for _, id := range ids {
if err := a.unmount(id); err != nil { if err := a.unmount(id); err != nil {
log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) log.Errorf("Unmounting %s: %s", common.TruncateID(id), err)
} }
} }

View File

@ -7,8 +7,8 @@ import (
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/graph" "github.com/docker/docker/graph"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/utils"
) )
func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status {
@ -143,11 +143,11 @@ func (daemon *Daemon) canDeleteImage(imgID string, force bool) error {
if imgID == p.ID { if imgID == p.ID {
if container.IsRunning() { if container.IsRunning() {
if force { if force {
return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", common.TruncateID(imgID), common.TruncateID(container.ID))
} }
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID))
} else if !force { } else if !force {
return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID))
} }
} }
return nil return nil

View File

@ -8,8 +8,8 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
) )
const defaultTimeIncrement = 100 const defaultTimeIncrement = 100
@ -230,7 +230,7 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
utils.TruncateID(m.container.ID), max) common.TruncateID(m.container.ID), max)
return false return false
} }

View File

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
@ -116,7 +117,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) {
// Create creates a new image and registers it in the graph. // Create creates a new image and registers it in the graph.
func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
img := &image.Image{ img := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: comment, Comment: comment,
Created: time.Now().UTC(), Created: time.Now().UTC(),
DockerVersion: dockerversion.VERSION, DockerVersion: dockerversion.VERSION,
@ -209,14 +210,14 @@ func (graph *Graph) TempLayerArchive(id string, sf *utils.StreamFormatter, outpu
if err != nil { if err != nil {
return nil, err return nil, err
} }
progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") progress := utils.ProgressReader(a, 0, output, sf, false, common.TruncateID(id), "Buffering to disk")
defer progress.Close() defer progress.Close()
return archive.NewTempArchive(progress, tmp) return archive.NewTempArchive(progress, tmp)
} }
// Mktemp creates a temporary sub-directory inside the graph's filesystem. // Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) { func (graph *Graph) Mktemp(id string) (string, error) {
dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) dir := path.Join(graph.Root, "_tmp", common.GenerateRandomID())
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
return "", err return "", err
} }

View File

@ -13,6 +13,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
@ -170,9 +171,9 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
// ensure no two downloads of the same image happen at the same time // ensure no two downloads of the same image happen at the same time
if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
if c != nil { if c != nil {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
<-c <-c
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
} else { } else {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
} }
@ -183,12 +184,12 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
defer s.poolRemove("pull", "img:"+img.ID) defer s.poolRemove("pull", "img:"+img.ID)
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
success := false success := false
var lastErr, err error var lastErr, err error
var is_downloaded bool var is_downloaded bool
for _, ep := range repoInfo.Index.Mirrors { for _, ep := range repoInfo.Index.Mirrors {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// Don't report errors when pulling from mirrors. // Don't report errors when pulling from mirrors.
log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err) log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
@ -200,12 +201,12 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
if !success { if !success {
for _, ep := range repoData.Endpoints { for _, ep := range repoData.Endpoints {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// It's not ideal that only the last error is returned, it would be better to concatenate the errors. // It's not ideal that only the last error is returned, it would be better to concatenate the errors.
// As the error is also given to the output stream the user will see the error. // As the error is also given to the output stream the user will see the error.
lastErr = err lastErr = err
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
continue continue
} }
layers_downloaded = layers_downloaded || is_downloaded layers_downloaded = layers_downloaded || is_downloaded
@ -215,13 +216,13 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
if !success { if !success {
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr) err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr)
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), err.Error(), nil))
if parallel { if parallel {
errors <- err errors <- err
return return
} }
} }
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
if parallel { if parallel {
errors <- nil errors <- nil
@ -268,7 +269,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
if err != nil { if err != nil {
return false, err return false, err
} }
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pulling dependent layers", nil))
// FIXME: Try to stream the images? // FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines // FIXME: Launch the getRemoteImage() in goroutines
@ -284,7 +285,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
defer s.poolRemove("pull", "layer:"+id) defer s.poolRemove("pull", "layer:"+id)
if !s.graph.Exists(id) { if !s.graph.Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Pulling metadata", nil))
var ( var (
imgJSON []byte imgJSON []byte
imgSize int imgSize int
@ -295,7 +296,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
for j := 1; j <= retries; j++ { for j := 1; j <= retries; j++ {
imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
if err != nil && j == retries { if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} else if err != nil { } else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
@ -304,7 +305,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
img, err = image.NewImgJSON(imgJSON) img, err = image.NewImgJSON(imgJSON)
layers_downloaded = true layers_downloaded = true
if err != nil && j == retries { if err != nil && j == retries {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil { } else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
@ -320,7 +321,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
if j > 1 { if j > 1 {
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
} }
out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) out.Write(sf.FormatProgress(common.TruncateID(id), status, nil))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
if uerr, ok := err.(*url.Error); ok { if uerr, ok := err.(*url.Error); ok {
err = uerr.Err err = uerr.Err
@ -329,26 +330,26 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue continue
} else if err != nil { } else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} }
layers_downloaded = true layers_downloaded = true
defer layer.Close() defer layer.Close()
err = s.graph.Register(img, err = s.graph.Register(img,
utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) utils.ProgressReader(layer, imgSize, out, sf, false, common.TruncateID(id), "Downloading"))
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue continue
} else if err != nil { } else if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Error downloading dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} else { } else {
break break
} }
} }
} }
out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(id), "Download complete", nil))
} }
return layers_downloaded, nil return layers_downloaded, nil
} }
@ -463,16 +464,16 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks)
} }
sumType, checksum := chunks[0], chunks[1] sumType, checksum := chunks[0], chunks[1]
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil))
downloadFunc := func(di *downloadInfo) error { downloadFunc := func(di *downloadInfo) error {
log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)
if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
if c != nil { if c != nil {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
<-c <-c
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
} else { } else {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
} }
@ -495,16 +496,16 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
return fmt.Errorf("unable to wrap image blob reader with TarSum: %s", err) return fmt.Errorf("unable to wrap image blob reader with TarSum: %s", err)
} }
io.Copy(tmpFile, utils.ProgressReader(ioutil.NopCloser(tarSumReader), int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) io.Copy(tmpFile, utils.ProgressReader(ioutil.NopCloser(tarSumReader), int(l), out, sf, false, common.TruncateID(img.ID), "Downloading"))
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Verifying Checksum", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil))
if finalChecksum := tarSumReader.Sum(nil); !strings.EqualFold(finalChecksum, sumStr) { if finalChecksum := tarSumReader.Sum(nil); !strings.EqualFold(finalChecksum, sumStr) {
log.Infof("Image verification failed: checksum mismatch - expected %q but got %q", sumStr, finalChecksum) log.Infof("Image verification failed: checksum mismatch - expected %q but got %q", sumStr, finalChecksum)
verified = false verified = false
} }
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
di.tmpFile = tmpFile di.tmpFile = tmpFile
@ -545,17 +546,17 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
d.tmpFile.Seek(0, 0) d.tmpFile.Seek(0, 0)
if d.tmpFile != nil { if d.tmpFile != nil {
err = s.graph.Register(d.img, err = s.graph.Register(d.img,
utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, common.TruncateID(d.img.ID), "Extracting"))
if err != nil { if err != nil {
return false, err return false, err
} }
// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
} }
out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil))
layersDownloaded = true layersDownloaded = true
} else { } else {
out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil))
} }
} }

View File

@ -14,6 +14,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"github.com/docker/libtrust" "github.com/docker/libtrust"
@ -129,7 +130,7 @@ func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Write
imagesToPush <- image.id imagesToPush <- image.id
continue continue
} }
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(image.id))) out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", common.TruncateID(image.id)))
} }
} }
@ -181,7 +182,7 @@ func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteNam
} }
} }
for _, tag := range tags[id] { for _, tag := range tags[id] {
out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag)) out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", common.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil { if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil {
return err return err
} }
@ -234,7 +235,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
if err != nil { if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
} }
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil))
imgData := &registry.ImgData{ imgData := &registry.ImgData{
ID: imgID, ID: imgID,
@ -243,7 +244,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
// Send the json // Send the json
if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
if err == registry.ErrAlreadyExists { if err == registry.ErrAlreadyExists {
out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
return "", nil return "", nil
} }
return "", err return "", err
@ -258,7 +259,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
// Send the layer // Send the layer
log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, common.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -269,7 +270,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
return "", err return "", err
} }
out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil))
return imgData.Checksum, nil return imgData.Checksum, nil
} }
@ -359,7 +360,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out
// Call mount blob // Call mount blob
exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth) exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth)
if err != nil { if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
return err return err
} }
@ -368,7 +369,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out
return err return err
} }
} else { } else {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image already exists", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image already exists", nil))
} }
} }
@ -382,7 +383,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk // PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName, sumType, sumStr string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) error { func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName, sumType, sumStr string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) error {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Buffering to Disk", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil))
image, err := s.graph.Get(img.ID) image, err := s.graph.Get(img.ID)
if err != nil { if err != nil {
@ -411,11 +412,11 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *
// Send the layer // Send the layer
log.Debugf("rendered layer for %s of [%d] size", img.ID, size) log.Debugf("rendered layer for %s of [%d] size", img.ID, size)
if err := r.PutV2ImageBlob(endpoint, imageName, sumType, sumStr, utils.ProgressReader(tf, int(size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth); err != nil { if err := r.PutV2ImageBlob(endpoint, imageName, sumType, sumStr, utils.ProgressReader(tf, int(size), out, sf, false, common.TruncateID(img.ID), "Pushing"), auth); err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
return err return err
} }
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil))
return nil return nil
} }

View File

@ -12,9 +12,9 @@ import (
"sync" "sync"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils"
"github.com/docker/libtrust" "github.com/docker/libtrust"
) )
@ -148,7 +148,7 @@ func (store *TagStore) ImageName(id string) string {
if names, exists := store.ByID()[id]; exists && len(names) > 0 { if names, exists := store.ByID()[id]; exists && len(names) > 0 {
return names[0] return names[0]
} }
return utils.TruncateID(id) return common.TruncateID(id)
} }
func (store *TagStore) DeleteAll(id string) error { func (store *TagStore) DeleteAll(id string) error {
@ -274,7 +274,7 @@ func (store *TagStore) GetRepoRefs() map[string][]string {
for name, repository := range store.Repositories { for name, repository := range store.Repositories {
for tag, id := range repository { for tag, id := range repository {
shortID := utils.TruncateID(id) shortID := common.TruncateID(id)
reporefs[shortID] = append(reporefs[shortID], fmt.Sprintf("%s:%s", name, tag)) reporefs[shortID] = append(reporefs[shortID], fmt.Sprintf("%s:%s", name, tag))
} }
} }

View File

@ -12,8 +12,8 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/api/client" "github.com/docker/docker/api/client"
"github.com/docker/docker/daemon" "github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/term"
"github.com/docker/docker/utils"
"github.com/kr/pty" "github.com/kr/pty"
) )
@ -286,7 +286,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
defer close(ch) defer close(ch)
if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil { if err := cli.CmdAttach(common.TruncateID(container.ID)); err != nil {
if err != io.ErrClosedPipe { if err != io.ErrClosedPipe {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -7,6 +7,7 @@ import (
"github.com/docker/docker/graph" "github.com/docker/docker/graph"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
@ -69,7 +70,7 @@ func TestInterruptedRegister(t *testing.T) {
defer nukeGraph(graph) defer nukeGraph(graph)
badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
image := &image.Image{ image := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: "testing", Comment: "testing",
Created: time.Now(), Created: time.Now(),
} }
@ -129,7 +130,7 @@ func TestRegister(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
image := &image.Image{ image := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: "testing", Comment: "testing",
Created: time.Now(), Created: time.Now(),
} }
@ -159,7 +160,7 @@ func TestDeletePrefix(t *testing.T) {
graph, _ := tempGraph(t) graph, _ := tempGraph(t)
defer nukeGraph(graph) defer nukeGraph(graph)
img := createTestImage(graph, t) img := createTestImage(graph, t)
if err := graph.Delete(utils.TruncateID(img.ID)); err != nil { if err := graph.Delete(common.TruncateID(img.ID)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
assertNImages(graph, t, 0) assertNImages(graph, t, 0)
@ -245,19 +246,19 @@ func TestByParent(t *testing.T) {
graph, _ := tempGraph(t) graph, _ := tempGraph(t)
defer nukeGraph(graph) defer nukeGraph(graph)
parentImage := &image.Image{ parentImage := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: "parent", Comment: "parent",
Created: time.Now(), Created: time.Now(),
Parent: "", Parent: "",
} }
childImage1 := &image.Image{ childImage1 := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: "child1", Comment: "child1",
Created: time.Now(), Created: time.Now(),
Parent: parentImage.ID, Parent: parentImage.ID,
} }
childImage2 := &image.Image{ childImage2 := &image.Image{
ID: utils.GenerateRandomID(), ID: common.GenerateRandomID(),
Comment: "child2", Comment: "child2",
Created: time.Now(), Created: time.Now(),
Parent: parentImage.ID, Parent: parentImage.ID,

View File

@ -22,6 +22,7 @@ import (
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
@ -305,7 +306,7 @@ func TestDaemonCreate(t *testing.T) {
&runconfig.HostConfig{}, &runconfig.HostConfig{},
"conflictname", "conflictname",
) )
if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), common.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
} }

38
pkg/common/randomid.go Normal file
View File

@ -0,0 +1,38 @@
package common
import (
"crypto/rand"
"encoding/hex"
"io"
"strconv"
)
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// GenerateRandomID returns an unique id
func GenerateRandomID() string {
for {
id := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, id); err != nil {
panic(err) // This shouldn't happen
}
value := hex.EncodeToString(id)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numberic and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil {
continue
}
return value
}
}

View File

@ -4,7 +4,7 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/docker/docker/utils" "github.com/docker/docker/pkg/common"
) )
// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
@ -111,7 +111,7 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
func BenchmarkTruncIndexAdd100(b *testing.B) { func BenchmarkTruncIndexAdd100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -127,7 +127,7 @@ func BenchmarkTruncIndexAdd100(b *testing.B) {
func BenchmarkTruncIndexAdd250(b *testing.B) { func BenchmarkTruncIndexAdd250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -143,7 +143,7 @@ func BenchmarkTruncIndexAdd250(b *testing.B) {
func BenchmarkTruncIndexAdd500(b *testing.B) { func BenchmarkTruncIndexAdd500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -160,7 +160,7 @@ func BenchmarkTruncIndexGet100(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -184,7 +184,7 @@ func BenchmarkTruncIndexGet250(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -208,7 +208,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -231,7 +231,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
func BenchmarkTruncIndexDelete100(b *testing.B) { func BenchmarkTruncIndexDelete100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -254,7 +254,7 @@ func BenchmarkTruncIndexDelete100(b *testing.B) {
func BenchmarkTruncIndexDelete250(b *testing.B) { func BenchmarkTruncIndexDelete250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -277,7 +277,7 @@ func BenchmarkTruncIndexDelete250(b *testing.B) {
func BenchmarkTruncIndexDelete500(b *testing.B) { func BenchmarkTruncIndexDelete500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -300,7 +300,7 @@ func BenchmarkTruncIndexDelete500(b *testing.B) {
func BenchmarkTruncIndexNew100(b *testing.B) { func BenchmarkTruncIndexNew100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -311,7 +311,7 @@ func BenchmarkTruncIndexNew100(b *testing.B) {
func BenchmarkTruncIndexNew250(b *testing.B) { func BenchmarkTruncIndexNew250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -322,7 +322,7 @@ func BenchmarkTruncIndexNew250(b *testing.B) {
func BenchmarkTruncIndexNew500(b *testing.B) { func BenchmarkTruncIndexNew500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, utils.GenerateRandomID()) testSet = append(testSet, common.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -334,7 +334,7 @@ func BenchmarkTruncIndexAddGet100(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := utils.GenerateRandomID() id := common.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])
@ -359,7 +359,7 @@ func BenchmarkTruncIndexAddGet250(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := utils.GenerateRandomID() id := common.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])
@ -384,7 +384,7 @@ func BenchmarkTruncIndexAddGet500(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := utils.GenerateRandomID() id := common.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])

View File

@ -3,7 +3,6 @@ package utils
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"crypto/rand"
"crypto/sha1" "crypto/sha1"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
@ -16,7 +15,6 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"strconv"
"strings" "strings"
"sync" "sync"
@ -164,36 +162,6 @@ func GetTotalUsedFds() int {
return -1 return -1
} }
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// GenerateRandomID returns an unique id
func GenerateRandomID() string {
for {
id := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, id); err != nil {
panic(err) // This shouldn't happen
}
value := hex.EncodeToString(id)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numberic and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil {
continue
}
return value
}
}
func ValidateID(id string) error { func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok { if ok := validHex.MatchString(id); !ok {
err := fmt.Errorf("image ID '%s' is invalid", id) err := fmt.Errorf("image ID '%s' is invalid", id)

View File

@ -9,7 +9,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/utils" "github.com/docker/docker/pkg/common"
) )
type Repository struct { type Repository struct {
@ -43,7 +43,7 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
var ( var (
isBindMount bool isBindMount bool
err error err error
id = utils.GenerateRandomID() id = common.GenerateRandomID()
) )
if path != "" { if path != "" {
isBindMount = true isBindMount = true