Refactor pkg/common, Fixes #11599

Signed-off-by: Antonio Murdaca <me@runcom.ninja>
This commit is contained in:
Antonio Murdaca 2015-03-24 12:25:26 +01:00
parent 05c23cad85
commit b80fae7356
32 changed files with 215 additions and 196 deletions

View File

@ -33,7 +33,6 @@ import (
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/opts" "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag" flag "github.com/docker/docker/pkg/mflag"
@ -43,6 +42,7 @@ import (
"github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/timeutils"
@ -1165,7 +1165,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
if *noTrunc { if *noTrunc {
fmt.Fprintf(w, "%s\t", outID) fmt.Fprintf(w, "%s\t", outID)
} else { } else {
fmt.Fprintf(w, "%s\t", common.TruncateID(outID)) fmt.Fprintf(w, "%s\t", stringid.TruncateID(outID))
} }
fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
@ -1180,7 +1180,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
if *noTrunc { if *noTrunc {
fmt.Fprintln(w, outID) fmt.Fprintln(w, outID)
} else { } else {
fmt.Fprintln(w, common.TruncateID(outID)) fmt.Fprintln(w, stringid.TruncateID(outID))
} }
} }
} }
@ -1479,7 +1479,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
} }
if matchName != "" { if matchName != "" {
if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) { if matchName == image.Get("Id") || matchName == stringid.TruncateID(image.Get("Id")) {
startImage = image startImage = image
} }
@ -1549,7 +1549,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
for _, out := range outs.Data { for _, out := range outs.Data {
outID := out.Get("Id") outID := out.Get("Id")
if !*noTrunc { if !*noTrunc {
outID = common.TruncateID(outID) outID = stringid.TruncateID(outID)
} }
repoTags := out.GetList("RepoTags") repoTags := out.GetList("RepoTags")
@ -1629,8 +1629,8 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin
imageID = image.Get("Id") imageID = image.Get("Id")
parentID = image.Get("ParentId") parentID = image.Get("ParentId")
} else { } else {
imageID = common.TruncateID(image.Get("Id")) imageID = stringid.TruncateID(image.Get("Id"))
parentID = common.TruncateID(image.Get("ParentId")) parentID = stringid.TruncateID(image.Get("ParentId"))
} }
if parentID == "" { if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
@ -1649,7 +1649,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri
if noTrunc { if noTrunc {
imageID = image.Get("Id") imageID = image.Get("Id")
} else { } else {
imageID = common.TruncateID(image.Get("Id")) imageID = stringid.TruncateID(image.Get("Id"))
} }
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize"))))
@ -1757,7 +1757,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
outID := out.Get("Id") outID := out.Get("Id")
if !*noTrunc { if !*noTrunc {
outID = common.TruncateID(outID) outID = stringid.TruncateID(outID)
} }
if *quiet { if *quiet {

View File

@ -32,8 +32,8 @@ import (
"github.com/docker/docker/builder/parser" "github.com/docker/docker/builder/parser"
"github.com/docker/docker/daemon" "github.com/docker/docker/daemon"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
@ -177,7 +177,7 @@ func (b *Builder) Run(context io.Reader) (string, error) {
} }
return "", err return "", err
} }
fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image)) fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
if b.Remove { if b.Remove {
b.clearTmp() b.clearTmp()
} }
@ -187,7 +187,7 @@ func (b *Builder) Run(context io.Reader) (string, error) {
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
} }
fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image)) fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image))
return b.image, nil return b.image, nil
} }

View File

@ -25,10 +25,10 @@ import (
imagepkg "github.com/docker/docker/image" imagepkg "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/tarsum"
@ -557,7 +557,7 @@ func (b *Builder) create() (*daemon.Container, error) {
} }
b.TmpContainers[c.ID] = struct{}{} b.TmpContainers[c.ID] = struct{}{}
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID)) fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
if len(config.Cmd) > 0 { if len(config.Cmd) > 0 {
// override the entry point that may have been picked up from the base image // override the entry point that may have been picked up from the base image
@ -753,11 +753,11 @@ func (b *Builder) clearTmp() {
} }
if err := b.Daemon.Rm(tmp); err != nil { if err := b.Daemon.Rm(tmp); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err) fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return return
} }
b.Daemon.DeleteVolumes(tmp.VolumePaths()) b.Daemon.DeleteVolumes(tmp.VolumePaths())
delete(b.TmpContainers, c) delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c)) fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
} }
} }

View File

@ -30,12 +30,12 @@ import (
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/etchosts" "github.com/docker/docker/pkg/etchosts"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/pkg/ulimit"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
@ -739,7 +739,7 @@ func (container *Container) Kill() error {
if _, err := container.WaitStop(10 * time.Second); err != nil { if _, err := container.WaitStop(10 * time.Second); err != nil {
// Ensure that we don't kill ourselves // Ensure that we don't kill ourselves
if pid := container.GetPid(); pid != 0 { if pid := container.GetPid(); pid != 0 {
log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", common.TruncateID(container.ID)) log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil { if err := syscall.Kill(pid, 9); err != nil {
if err != syscall.ESRCH { if err != syscall.ESRCH {
return err return err

View File

@ -31,13 +31,13 @@ import (
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/namesgenerator"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
@ -517,7 +517,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
var ( var (
err error err error
id = common.GenerateRandomID() id = stringid.GenerateRandomID()
) )
if name == "" { if name == "" {
@ -562,7 +562,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
nameAsKnownByUser := strings.TrimPrefix(name, "/") nameAsKnownByUser := strings.TrimPrefix(name, "/")
return "", fmt.Errorf( return "", fmt.Errorf(
"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser, "Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
common.TruncateID(conflictingContainer.ID)) stringid.TruncateID(conflictingContainer.ID))
} }
} }
return name, nil return name, nil
@ -585,7 +585,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
return name, nil return name, nil
} }
name = "/" + common.TruncateID(id) name = "/" + stringid.TruncateID(id)
if _, err := daemon.containerGraph.Set(name, id); err != nil { if _, err := daemon.containerGraph.Set(name, id); err != nil {
return "", err return "", err
} }

View File

@ -12,9 +12,9 @@ import (
"github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/daemon/execdriver/lxc"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
) )
@ -141,7 +141,7 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
} }
execConfig := &execConfig{ execConfig := &execConfig{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
OpenStdin: config.AttachStdin, OpenStdin: config.AttachStdin,
OpenStdout: config.AttachStdout, OpenStdout: config.AttachStdout,
OpenStderr: config.AttachStderr, OpenStderr: config.AttachStderr,

View File

@ -34,9 +34,9 @@ import (
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/directory"
mountpk "github.com/docker/docker/pkg/mount" mountpk "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/libcontainer/label" "github.com/docker/libcontainer/label"
) )
@ -405,7 +405,7 @@ func (a *Driver) Cleanup() error {
for _, id := range ids { for _, id := range ids {
if err := a.unmount(id); err != nil { if err := a.unmount(id); err != nil {
log.Errorf("Unmounting %s: %s", common.TruncateID(id), err) log.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
} }
} }

View File

@ -7,8 +7,8 @@ import (
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/graph" "github.com/docker/docker/graph"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -148,11 +148,11 @@ func (daemon *Daemon) canDeleteImage(imgID string, force bool) error {
if imgID == p.ID { if imgID == p.ID {
if container.IsRunning() { if container.IsRunning() {
if force { if force {
return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", common.TruncateID(imgID), common.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
} }
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
} else if !force { } else if !force {
return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID)) return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
} }
} }
return nil return nil

View File

@ -8,7 +8,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
) )
@ -230,7 +230,7 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
common.TruncateID(m.container.ID), max) stringid.TruncateID(m.container.ID), max)
return false return false
} }

View File

@ -10,8 +10,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringutils"
) )
// Installer is a standard interface for objects which can "install" themselves // Installer is a standard interface for objects which can "install" themselves
@ -78,7 +78,7 @@ func (eng *Engine) RegisterCatchall(catchall Handler) {
func New() *Engine { func New() *Engine {
eng := &Engine{ eng := &Engine{
handlers: make(map[string]Handler), handlers: make(map[string]Handler),
id: common.RandomString(), id: stringutils.GenerateRandomString(),
Stdout: os.Stdout, Stdout: os.Stdout,
Stderr: os.Stderr, Stderr: os.Stderr,
Stdin: os.Stdin, Stdin: os.Stdin,

View File

@ -3,24 +3,12 @@ package engine
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"math/rand"
"testing" "testing"
"time" "time"
"github.com/docker/docker/pkg/stringutils"
) )
const chars = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
// RandomString returns random string of specified length
func RandomString(length int) string {
res := make([]byte, length)
for i := 0; i < length; i++ {
res[i] = chars[rand.Intn(len(chars))]
}
return string(res)
}
func TestEnvLenZero(t *testing.T) { func TestEnvLenZero(t *testing.T) {
env := &Env{} env := &Env{}
if env.Len() != 0 { if env.Len() != 0 {
@ -197,7 +185,7 @@ func TestMultiMap(t *testing.T) {
func testMap(l int) [][2]string { func testMap(l int) [][2]string {
res := make([][2]string, l) res := make([][2]string, l)
for i := 0; i < l; i++ { for i := 0; i < l; i++ {
t := [2]string{RandomString(5), RandomString(20)} t := [2]string{stringutils.GenerateRandomAsciiString(5), stringutils.GenerateRandomAsciiString(20)}
res[i] = t res[i] = t
} }
return res return res

View File

@ -17,8 +17,8 @@ import (
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
@ -118,7 +118,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) {
// Create creates a new image and registers it in the graph. // Create creates a new image and registers it in the graph.
func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
img := &image.Image{ img := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: comment, Comment: comment,
Created: time.Now().UTC(), Created: time.Now().UTC(),
DockerVersion: dockerversion.VERSION, DockerVersion: dockerversion.VERSION,
@ -217,7 +217,7 @@ func (graph *Graph) TempLayerArchive(id string, sf *utils.StreamFormatter, outpu
Formatter: sf, Formatter: sf,
Size: 0, Size: 0,
NewLines: false, NewLines: false,
ID: common.TruncateID(id), ID: stringid.TruncateID(id),
Action: "Buffering to disk", Action: "Buffering to disk",
}) })
defer progressReader.Close() defer progressReader.Close()
@ -226,7 +226,7 @@ func (graph *Graph) TempLayerArchive(id string, sf *utils.StreamFormatter, outpu
// Mktemp creates a temporary sub-directory inside the graph's filesystem. // Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) { func (graph *Graph) Mktemp(id string) (string, error) {
dir := path.Join(graph.Root, "_tmp", common.GenerateRandomID()) dir := path.Join(graph.Root, "_tmp", stringid.GenerateRandomID())
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
return "", err return "", err
} }

View File

@ -14,8 +14,8 @@ import (
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -172,9 +172,9 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
// ensure no two downloads of the same image happen at the same time // ensure no two downloads of the same image happen at the same time
if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
if c != nil { if c != nil {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
<-c <-c
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
} else { } else {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
} }
@ -185,12 +185,12 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
defer s.poolRemove("pull", "img:"+img.ID) defer s.poolRemove("pull", "img:"+img.ID)
out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
success := false success := false
var lastErr, err error var lastErr, err error
var is_downloaded bool var is_downloaded bool
for _, ep := range repoInfo.Index.Mirrors { for _, ep := range repoInfo.Index.Mirrors {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// Don't report errors when pulling from mirrors. // Don't report errors when pulling from mirrors.
log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err) log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
@ -202,12 +202,12 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
if !success { if !success {
for _, ep := range repoData.Endpoints { for _, ep := range repoData.Endpoints {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// It's not ideal that only the last error is returned, it would be better to concatenate the errors. // It's not ideal that only the last error is returned, it would be better to concatenate the errors.
// As the error is also given to the output stream the user will see the error. // As the error is also given to the output stream the user will see the error.
lastErr = err lastErr = err
out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
continue continue
} }
layers_downloaded = layers_downloaded || is_downloaded layers_downloaded = layers_downloaded || is_downloaded
@ -217,13 +217,13 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} }
if !success { if !success {
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr) err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr)
out.Write(sf.FormatProgress(common.TruncateID(img.ID), err.Error(), nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
if parallel { if parallel {
errors <- err errors <- err
return return
} }
} }
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
if parallel { if parallel {
errors <- nil errors <- nil
@ -270,7 +270,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
if err != nil { if err != nil {
return false, err return false, err
} }
out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pulling dependent layers", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
// FIXME: Try to stream the images? // FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines // FIXME: Launch the getRemoteImage() in goroutines
@ -286,7 +286,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
defer s.poolRemove("pull", "layer:"+id) defer s.poolRemove("pull", "layer:"+id)
if !s.graph.Exists(id) { if !s.graph.Exists(id) {
out.Write(sf.FormatProgress(common.TruncateID(id), "Pulling metadata", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
var ( var (
imgJSON []byte imgJSON []byte
imgSize int imgSize int
@ -297,7 +297,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
for j := 1; j <= retries; j++ { for j := 1; j <= retries; j++ {
imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
if err != nil && j == retries { if err != nil && j == retries {
out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} else if err != nil { } else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
@ -306,7 +306,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
img, err = image.NewImgJSON(imgJSON) img, err = image.NewImgJSON(imgJSON)
layers_downloaded = true layers_downloaded = true
if err != nil && j == retries { if err != nil && j == retries {
out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil { } else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
@ -322,7 +322,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
if j > 1 { if j > 1 {
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
} }
out.Write(sf.FormatProgress(common.TruncateID(id), status, nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), status, nil))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
if uerr, ok := err.(*url.Error); ok { if uerr, ok := err.(*url.Error); ok {
err = uerr.Err err = uerr.Err
@ -331,7 +331,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue continue
} else if err != nil { } else if err != nil {
out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} }
layers_downloaded = true layers_downloaded = true
@ -344,21 +344,21 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
Formatter: sf, Formatter: sf,
Size: imgSize, Size: imgSize,
NewLines: false, NewLines: false,
ID: common.TruncateID(id), ID: stringid.TruncateID(id),
Action: "Downloading", Action: "Downloading",
})) }))
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond) time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue continue
} else if err != nil { } else if err != nil {
out.Write(sf.FormatProgress(common.TruncateID(id), "Error downloading dependent layers", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
return layers_downloaded, err return layers_downloaded, err
} else { } else {
break break
} }
} }
} }
out.Write(sf.FormatProgress(common.TruncateID(id), "Download complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
} }
return layers_downloaded, nil return layers_downloaded, nil
} }
@ -478,16 +478,16 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
} }
downloads[i].digest = dgst downloads[i].digest = dgst
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
downloadFunc := func(di *downloadInfo) error { downloadFunc := func(di *downloadInfo) error {
log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)
if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
if c != nil { if c != nil {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
<-c <-c
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
} else { } else {
log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
} }
@ -515,20 +515,20 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
Formatter: sf, Formatter: sf,
Size: int(l), Size: int(l),
NewLines: false, NewLines: false,
ID: common.TruncateID(img.ID), ID: stringid.TruncateID(img.ID),
Action: "Downloading", Action: "Downloading",
})); err != nil { })); err != nil {
return fmt.Errorf("unable to copy v2 image blob data: %s", err) return fmt.Errorf("unable to copy v2 image blob data: %s", err)
} }
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
if !verifier.Verified() { if !verifier.Verified() {
log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String()) log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
verified = false verified = false
} }
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
di.tmpFile = tmpFile di.tmpFile = tmpFile
@ -574,7 +574,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
Out: out, Out: out,
Formatter: sf, Formatter: sf,
Size: int(d.length), Size: int(d.length),
ID: common.TruncateID(d.img.ID), ID: stringid.TruncateID(d.img.ID),
Action: "Extracting", Action: "Extracting",
})) }))
if err != nil { if err != nil {
@ -583,10 +583,10 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
} }
out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
tagUpdated = true tagUpdated = true
} else { } else {
out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
} }
} }

View File

@ -16,8 +16,8 @@ import (
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
@ -139,7 +139,7 @@ func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Write
imagesToPush <- image.id imagesToPush <- image.id
continue continue
} }
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", common.TruncateID(image.id))) out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id)))
} }
} }
@ -191,7 +191,7 @@ func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteNam
} }
} }
for _, tag := range tags[id] { for _, tag := range tags[id] {
out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", common.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag)) out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil { if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil {
return err return err
} }
@ -244,7 +244,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
if err != nil { if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
} }
out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
imgData := &registry.ImgData{ imgData := &registry.ImgData{
ID: imgID, ID: imgID,
@ -253,7 +253,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
// Send the json // Send the json
if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
if err == registry.ErrAlreadyExists { if err == registry.ErrAlreadyExists {
out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
return "", nil return "", nil
} }
return "", err return "", err
@ -275,7 +275,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
Formatter: sf, Formatter: sf,
Size: int(layerData.Size), Size: int(layerData.Size),
NewLines: false, NewLines: false,
ID: common.TruncateID(imgData.ID), ID: stringid.TruncateID(imgData.ID),
Action: "Pushing", Action: "Pushing",
}), ep, token, jsonRaw) }), ep, token, jsonRaw)
if err != nil { if err != nil {
@ -288,7 +288,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
return "", err return "", err
} }
out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
return imgData.Checksum, nil return imgData.Checksum, nil
} }
@ -385,7 +385,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
// Call mount blob // Call mount blob
exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth) exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth)
if err != nil { if err != nil {
out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image push failed", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
return err return err
} }
} }
@ -400,7 +400,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
checksum = cs checksum = cs
} }
} else { } else {
out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image already exists", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
} }
m.FSLayers[i] = &registry.FSLayer{BlobSum: checksum} m.FSLayers[i] = &registry.FSLayer{BlobSum: checksum}
m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)} m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)}
@ -443,7 +443,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk // PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) { func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
image, err := s.graph.Get(img.ID) image, err := s.graph.Get(img.ID)
if err != nil { if err != nil {
@ -481,13 +481,13 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *
Formatter: sf, Formatter: sf,
Size: int(size), Size: int(size),
NewLines: false, NewLines: false,
ID: common.TruncateID(img.ID), ID: stringid.TruncateID(img.ID),
Action: "Pushing", Action: "Pushing",
}), auth); err != nil { }), auth); err != nil {
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image push failed", nil))
return "", err return "", err
} }
out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil)) out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
return dgst.String(), nil return dgst.String(), nil
} }

View File

@ -13,8 +13,8 @@ import (
"sync" "sync"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"github.com/docker/libtrust" "github.com/docker/libtrust"
@ -163,7 +163,7 @@ func (store *TagStore) ImageName(id string) string {
if names, exists := store.ByID()[id]; exists && len(names) > 0 { if names, exists := store.ByID()[id]; exists && len(names) > 0 {
return names[0] return names[0]
} }
return common.TruncateID(id) return stringid.TruncateID(id)
} }
func (store *TagStore) DeleteAll(id string) error { func (store *TagStore) DeleteAll(id string) error {
@ -331,7 +331,7 @@ func (store *TagStore) GetRepoRefs() map[string][]string {
for name, repository := range store.Repositories { for name, repository := range store.Repositories {
for tag, id := range repository { for tag, id := range repository {
shortID := common.TruncateID(id) shortID := stringid.TruncateID(id)
reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag)) reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag))
} }
} }

View File

@ -22,6 +22,7 @@ import (
"github.com/docker/docker/builder/command" "github.com/docker/docker/builder/command"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringutils"
) )
func TestBuildJSONEmptyRun(t *testing.T) { func TestBuildJSONEmptyRun(t *testing.T) {
@ -4420,7 +4421,7 @@ func TestBuildOnBuildOutput(t *testing.T) {
} }
func TestBuildInvalidTag(t *testing.T) { func TestBuildInvalidTag(t *testing.T) {
name := "abcd:" + makeRandomString(200) name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
defer deleteImages(name) defer deleteImages(name)
_, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true)
// if the error doesnt check for illegal tag name, or the image is built // if the error doesnt check for illegal tag name, or the image is built

View File

@ -9,7 +9,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/stringid"
) )
func TestImagesEnsureImageIsListed(t *testing.T) { func TestImagesEnsureImageIsListed(t *testing.T) {
@ -196,7 +196,7 @@ func TestImagesEnsureDanglingImageOnlyListedOnce(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error tagging foobox: %s", err) t.Fatalf("error tagging foobox: %s", err)
} }
imageId := common.TruncateID(strings.TrimSpace(out)) imageId := stringid.TruncateID(strings.TrimSpace(out))
defer deleteImages(imageId) defer deleteImages(imageId)
// overwrite the tag, making the previous image dangling // overwrite the tag, making the previous image dangling

View File

@ -5,6 +5,8 @@ import (
"os/exec" "os/exec"
"strings" "strings"
"testing" "testing"
"github.com/docker/docker/pkg/stringutils"
) )
// tagging a named image in a new unprefixed repo should work // tagging a named image in a new unprefixed repo should work
@ -59,7 +61,7 @@ func TestTagInvalidUnprefixedRepo(t *testing.T) {
// ensure we don't allow the use of invalid tags; these tag operations should fail // ensure we don't allow the use of invalid tags; these tag operations should fail
func TestTagInvalidPrefixedRepo(t *testing.T) { func TestTagInvalidPrefixedRepo(t *testing.T) {
long_tag := makeRandomString(121) long_tag := stringutils.GenerateRandomAlphaOnlyString(121)
invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag} invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag}

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"github.com/docker/docker/api" "github.com/docker/docker/api"
"github.com/docker/docker/pkg/stringutils"
) )
// Daemon represents a Docker daemon for the testing framework. // Daemon represents a Docker daemon for the testing framework.
@ -695,8 +696,8 @@ func (f *remoteFileServer) Close() error {
func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) {
var ( var (
image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(makeRandomString(10))) image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(makeRandomString(10))) container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
) )
// Build the image // Build the image

View File

@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/rand"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
@ -17,6 +16,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
) )
@ -301,21 +301,10 @@ func copyWithCP(source, target string) error {
return nil return nil
} }
func makeRandomString(n int) string {
// make a really long string
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]byte, n)
r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
for i := range b {
b[i] = letters[r.Intn(len(letters))]
}
return string(b)
}
// randomUnixTmpDirPath provides a temporary unix path with rand string appended. // randomUnixTmpDirPath provides a temporary unix path with rand string appended.
// does not create or checks if it exists. // does not create or checks if it exists.
func randomUnixTmpDirPath(s string) string { func randomUnixTmpDirPath(s string) string {
return path.Join("/tmp", fmt.Sprintf("%s.%s", s, makeRandomString(10))) return path.Join("/tmp", fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10)))
} }
// Reads chunkSize bytes from reader after every interval. // Reads chunkSize bytes from reader after every interval.

View File

@ -12,7 +12,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/api/client" "github.com/docker/docker/api/client"
"github.com/docker/docker/daemon" "github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/term"
"github.com/kr/pty" "github.com/kr/pty"
) )
@ -286,7 +286,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
defer close(ch) defer close(ch)
if err := cli.CmdAttach(common.TruncateID(container.ID)); err != nil { if err := cli.CmdAttach(stringid.TruncateID(container.ID)); err != nil {
if err != io.ErrClosedPipe { if err != io.ErrClosedPipe {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -2,19 +2,20 @@ package docker
import ( import (
"errors" "errors"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
"testing" "testing"
"time" "time"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/utils"
) )
func TestMount(t *testing.T) { func TestMount(t *testing.T) {
@ -70,7 +71,7 @@ func TestInterruptedRegister(t *testing.T) {
defer nukeGraph(graph) defer nukeGraph(graph)
badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
image := &image.Image{ image := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: "testing", Comment: "testing",
Created: time.Now(), Created: time.Now(),
} }
@ -130,7 +131,7 @@ func TestRegister(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
image := &image.Image{ image := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: "testing", Comment: "testing",
Created: time.Now(), Created: time.Now(),
} }
@ -160,7 +161,7 @@ func TestDeletePrefix(t *testing.T) {
graph, _ := tempGraph(t) graph, _ := tempGraph(t)
defer nukeGraph(graph) defer nukeGraph(graph)
img := createTestImage(graph, t) img := createTestImage(graph, t)
if err := graph.Delete(common.TruncateID(img.ID)); err != nil { if err := graph.Delete(stringid.TruncateID(img.ID)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
assertNImages(graph, t, 0) assertNImages(graph, t, 0)
@ -246,19 +247,19 @@ func TestByParent(t *testing.T) {
graph, _ := tempGraph(t) graph, _ := tempGraph(t)
defer nukeGraph(graph) defer nukeGraph(graph)
parentImage := &image.Image{ parentImage := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: "parent", Comment: "parent",
Created: time.Now(), Created: time.Now(),
Parent: "", Parent: "",
} }
childImage1 := &image.Image{ childImage1 := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: "child1", Comment: "child1",
Created: time.Now(), Created: time.Now(),
Parent: parentImage.ID, Parent: parentImage.ID,
} }
childImage2 := &image.Image{ childImage2 := &image.Image{
ID: common.GenerateRandomID(), ID: stringid.GenerateRandomID(),
Comment: "child2", Comment: "child2",
Created: time.Now(), Created: time.Now(),
Parent: parentImage.ID, Parent: parentImage.ID,

View File

@ -22,9 +22,9 @@ import (
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/nat" "github.com/docker/docker/nat"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -306,7 +306,7 @@ func TestDaemonCreate(t *testing.T) {
&runconfig.HostConfig{}, &runconfig.HostConfig{},
"conflictname", "conflictname",
) )
if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), common.TruncateID(testContainer.ID)) { if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), stringid.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err) t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err)
} }

1
pkg/stringid/README.md Normal file
View File

@ -0,0 +1 @@
This package provides helper functions for dealing with string identifiers

View File

@ -1,4 +1,4 @@
package common package stringid
import ( import (
"crypto/rand" "crypto/rand"
@ -36,12 +36,3 @@ func GenerateRandomID() string {
return value return value
} }
} }
func RandomString() string {
id := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, id); err != nil {
panic(err) // This shouldn't happen
}
return hex.EncodeToString(id)
}

View File

@ -1,8 +1,14 @@
package common package stringid
import ( import "testing"
"testing"
) func TestGenerateRandomID(t *testing.T) {
id := GenerateRandomID()
if len(id) != 64 {
t.Fatalf("Id returned is incorrect: %s", id)
}
}
func TestShortenId(t *testing.T) { func TestShortenId(t *testing.T) {
id := GenerateRandomID() id := GenerateRandomID()
@ -27,33 +33,3 @@ func TestShortenIdInvalid(t *testing.T) {
t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
} }
} }
func TestGenerateRandomID(t *testing.T) {
id := GenerateRandomID()
if len(id) != 64 {
t.Fatalf("Id returned is incorrect: %s", id)
}
}
func TestRandomString(t *testing.T) {
id := RandomString()
if len(id) != 64 {
t.Fatalf("Id returned is incorrect: %s", id)
}
}
func TestRandomStringUniqueness(t *testing.T) {
repeats := 25
set := make(map[string]struct{}, repeats)
for i := 0; i < repeats; i = i + 1 {
id := RandomString()
if len(id) != 64 {
t.Fatalf("Id returned is incorrect: %s", id)
}
if _, ok := set[id]; ok {
t.Fatalf("Random number is repeated")
}
set[id] = struct{}{}
}
}

View File

@ -0,0 +1 @@
This package provides helper functions for dealing with strings

View File

@ -0,0 +1,43 @@
package stringutils
import (
"crypto/rand"
"encoding/hex"
"io"
mathrand "math/rand"
"time"
)
// Generate 32 chars random string
func GenerateRandomString() string {
id := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, id); err != nil {
panic(err) // This shouldn't happen
}
return hex.EncodeToString(id)
}
// Generate alpha only random stirng with length n
func GenerateRandomAlphaOnlyString(n int) string {
// make a really long string
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]byte, n)
r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano()))
for i := range b {
b[i] = letters[r.Intn(len(letters))]
}
return string(b)
}
// Generate Ascii random stirng with length n
func GenerateRandomAsciiString(n int) string {
chars := "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
res := make([]byte, n)
for i := 0; i < n; i++ {
res[i] = chars[mathrand.Intn(len(chars))]
}
return string(res)
}

View File

@ -0,0 +1,25 @@
package stringutils
import "testing"
func TestRandomString(t *testing.T) {
str := GenerateRandomString()
if len(str) != 64 {
t.Fatalf("Id returned is incorrect: %s", str)
}
}
func TestRandomStringUniqueness(t *testing.T) {
repeats := 25
set := make(map[string]struct{}, repeats)
for i := 0; i < repeats; i = i + 1 {
str := GenerateRandomString()
if len(str) != 64 {
t.Fatalf("Id returned is incorrect: %s", str)
}
if _, ok := set[str]; ok {
t.Fatalf("Random number is repeated")
}
set[str] = struct{}{}
}
}

View File

@ -4,7 +4,7 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/stringid"
) )
// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
@ -111,7 +111,7 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
func BenchmarkTruncIndexAdd100(b *testing.B) { func BenchmarkTruncIndexAdd100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -127,7 +127,7 @@ func BenchmarkTruncIndexAdd100(b *testing.B) {
func BenchmarkTruncIndexAdd250(b *testing.B) { func BenchmarkTruncIndexAdd250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -143,7 +143,7 @@ func BenchmarkTruncIndexAdd250(b *testing.B) {
func BenchmarkTruncIndexAdd500(b *testing.B) { func BenchmarkTruncIndexAdd500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -160,7 +160,7 @@ func BenchmarkTruncIndexGet100(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -184,7 +184,7 @@ func BenchmarkTruncIndexGet250(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -208,7 +208,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
index := NewTruncIndex([]string{}) index := NewTruncIndex([]string{})
for _, id := range testSet { for _, id := range testSet {
@ -231,7 +231,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
func BenchmarkTruncIndexDelete100(b *testing.B) { func BenchmarkTruncIndexDelete100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -254,7 +254,7 @@ func BenchmarkTruncIndexDelete100(b *testing.B) {
func BenchmarkTruncIndexDelete250(b *testing.B) { func BenchmarkTruncIndexDelete250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -277,7 +277,7 @@ func BenchmarkTruncIndexDelete250(b *testing.B) {
func BenchmarkTruncIndexDelete500(b *testing.B) { func BenchmarkTruncIndexDelete500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -300,7 +300,7 @@ func BenchmarkTruncIndexDelete500(b *testing.B) {
func BenchmarkTruncIndexNew100(b *testing.B) { func BenchmarkTruncIndexNew100(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -311,7 +311,7 @@ func BenchmarkTruncIndexNew100(b *testing.B) {
func BenchmarkTruncIndexNew250(b *testing.B) { func BenchmarkTruncIndexNew250(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -322,7 +322,7 @@ func BenchmarkTruncIndexNew250(b *testing.B) {
func BenchmarkTruncIndexNew500(b *testing.B) { func BenchmarkTruncIndexNew500(b *testing.B) {
var testSet []string var testSet []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
testSet = append(testSet, common.GenerateRandomID()) testSet = append(testSet, stringid.GenerateRandomID())
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -334,7 +334,7 @@ func BenchmarkTruncIndexAddGet100(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := common.GenerateRandomID() id := stringid.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])
@ -359,7 +359,7 @@ func BenchmarkTruncIndexAddGet250(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := common.GenerateRandomID() id := stringid.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])
@ -384,7 +384,7 @@ func BenchmarkTruncIndexAddGet500(b *testing.B) {
var testSet []string var testSet []string
var testKeys []string var testKeys []string
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
id := common.GenerateRandomID() id := stringid.GenerateRandomID()
testSet = append(testSet, id) testSet = append(testSet, id)
l := rand.Intn(12) + 12 l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l]) testKeys = append(testKeys, id[:l])

View File

@ -21,9 +21,9 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringutils"
) )
type KeyValuePair struct { type KeyValuePair struct {
@ -312,7 +312,7 @@ var globalTestID string
// new directory. // new directory.
func TestDirectory(templateDir string) (dir string, err error) { func TestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" { if globalTestID == "" {
globalTestID = common.RandomString()[:4] globalTestID = stringutils.GenerateRandomString()[:4]
} }
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
if prefix == "" { if prefix == "" {

View File

@ -9,7 +9,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/stringid"
) )
type Repository struct { type Repository struct {
@ -43,7 +43,7 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
var ( var (
isBindMount bool isBindMount bool
err error err error
id = common.GenerateRandomID() id = stringid.GenerateRandomID()
) )
if path != "" { if path != "" {
isBindMount = true isBindMount = true