Merge pull request #40263 from thaJeztah/normalize_comments

Normalize comment formatting
This commit is contained in:
Brian Goff 2019-12-12 12:06:22 -08:00 committed by GitHub
commit b95fad8e51
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
65 changed files with 180 additions and 189 deletions

View File

@ -41,7 +41,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
}
config, _, _, err := s.decoder.DecodeConfig(r.Body)
if err != nil && err != io.EOF { //Do not fail if body is empty.
if err != nil && err != io.EOF { // Do not fail if body is empty.
return err
}

View File

@ -57,7 +57,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
}
}
if image != "" { //pull
if image != "" { // pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
@ -76,7 +76,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
}
}
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
} else { //import
} else { // import
src := r.Form.Get("fromSrc")
// 'err' MUST NOT be defined within this block, we need any error
// generated from the download to be available to the output

View File

@ -211,7 +211,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
return err
}
//TODO: send progress bar
// TODO: send progress bar
w.WriteHeader(http.StatusNoContent)
return nil
}

View File

@ -30,7 +30,7 @@ type ContainerAttachConfig struct {
// expectation is for the logger endpoints to assemble the chunks using this
// metadata.
type PartialLogMetaData struct {
Last bool //true if this message is last of a partial
Last bool // true if this message is last of a partial
ID string // identifies group of messages comprising a single record
Ordinal int // ordering of message in partial group
}

View File

@ -265,7 +265,7 @@ type ImagePullOptions struct {
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
//ImagePushOptions holds information to push images.
// ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.

View File

@ -145,7 +145,7 @@ func (n NetworkMode) ConnectedContainer() string {
return ""
}
//UserDefined indicates user-created network
// UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)

View File

@ -154,7 +154,7 @@ func (args Args) Len() int {
func (args Args) MatchKVList(key string, sources map[string]string) bool {
fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
@ -200,7 +200,7 @@ func (args Args) Match(field, source string) bool {
// ExactMatch returns true if the source matches exactly one of the values.
func (args Args) ExactMatch(key, source string) bool {
fieldValues, ok := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
}
@ -213,7 +213,7 @@ func (args Args) ExactMatch(key, source string) bool {
// matches exactly the value.
func (args Args) UniqueExactMatch(key, source string) bool {
fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
// do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}

View File

@ -13,7 +13,7 @@ type Address struct {
// IPAM represents IP Address Management
type IPAM struct {
Driver string
Options map[string]string //Per network IPAM driver options
Options map[string]string // Per network IPAM driver options
Config []IPAMConfig
}

View File

@ -15,7 +15,7 @@ import (
"gotest.tools/fs"
)
var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} // xz magic
func TestSelectAcceptableMIME(t *testing.T) {
validMimeStrings := []string{

View File

@ -14,7 +14,7 @@ import (
// It returns the JSON content in the response body.
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
if ref != "" {
//Check if the given image name can be resolved
// Check if the given image name can be resolved
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
return nil, err
}

View File

@ -47,10 +47,10 @@ func networkFromGRPC(n *swarmapi.Network) types.Network {
network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
//Annotations
// Annotations
network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
//DriverConfiguration
// DriverConfiguration
if n.Spec.DriverConfig != nil {
network.Spec.DriverConfiguration = &types.Driver{
Name: n.Spec.DriverConfig.Name,
@ -58,7 +58,7 @@ func networkFromGRPC(n *swarmapi.Network) types.Network {
}
}
//DriverState
// DriverState
if n.DriverState != nil {
network.DriverState = types.Driver{
Name: n.DriverState.Name,

View File

@ -29,10 +29,10 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
//Annotations
// Annotations
node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
//Description
// Description
if n.Description != nil {
node.Description.Hostname = n.Description.Hostname
if n.Description.Platform != nil {
@ -58,7 +58,7 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
}
}
//Manager
// Manager
if n.ManagerStatus != nil {
node.ManagerStatus = &types.ManagerStatus{
Leader: n.ManagerStatus.Leader,

View File

@ -95,7 +95,7 @@ func validateDefaultAddrPool(defaultAddrPool []string, size uint32) error {
// defaultAddrPool is not defined
return nil
}
//if size is not set, then we use default value 24
// if size is not set, then we use default value 24
if size == 0 {
size = 24
}

View File

@ -93,7 +93,6 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
}
}
//Validate Default Address Pool input
if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil {
return "", err
}

View File

@ -154,7 +154,7 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config *
base.Created = time.Now().UTC()
base.Managed = managed
base.Path = entrypoint
base.Args = args //FIXME: de-duplicate from config
base.Args = args // FIXME: de-duplicate from config
base.Config = config
base.HostConfig = &containertypes.HostConfig{}
base.ImageID = imgID

View File

@ -8,7 +8,7 @@ import (
)
func (daemon *Daemon) saveApparmorConfig(container *container.Container) error {
container.AppArmorProfile = "" //we don't care about the previous value.
container.AppArmorProfile = "" // we don't care about the previous value.
if !daemon.apparmorEnabled {
return nil // if apparmor is disabled there is nothing to do here.

View File

@ -163,9 +163,9 @@ func TestLogEvents(t *testing.T) {
// https://github.com/docker/docker/issues/20999
// Fixtures:
//
//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)
//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
// 2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
// 2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)
// 2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
func TestLoadBufferedEvents(t *testing.T) {
now := time.Now()
f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now)

View File

@ -134,7 +134,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
// Driver contains information about the filesystem mounted.
type Driver struct {
//root of the file system
// root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap

View File

@ -143,7 +143,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
switch mode := f.Mode(); {
case mode.IsRegular():
//the type is 32bit on mips
// the type is 32bit on mips
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} // nolint: unconvert
if copyMode == Hardlink {
isHardlink = true

View File

@ -67,8 +67,7 @@ func TestCopyDir(t *testing.T) {
if srcFileSys.Dev == dstFileSys.Dev {
assert.Check(t, srcFileSys.Ino != dstFileSys.Ino)
}
// Todo: check size, and ctim is not equal
/// on filesystems that have granular ctimes
// Todo: check size, and ctim is not equal on filesystems that have granular ctimes
assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode))
assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid))
assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid))

View File

@ -119,7 +119,7 @@ type DeviceSet struct {
deletionWorkerTicker *time.Ticker
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
minFreeSpacePercent uint32 //min free space percentage in thinpool
minFreeSpacePercent uint32 // min free space percentage in thinpool
xfsNospaceRetries string // max retries when xfs receives ENOSPC
lvmSetupConfig directLVMConfig
}
@ -1692,8 +1692,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
}
}
//create the root dir of the devmapper driver ownership to match this
//daemon's remapped root uid/gid so containers can start properly
// create the root dir of the devmapper driver ownership to match this
// daemon's remapped root uid/gid so containers can start properly
uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
if err != nil {
return err

View File

@ -110,7 +110,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS
if err := driver.Cleanup(); err != nil {
t.Fatal(err)
}
//Reload
// Reload
d, err := Init(driver.home, []string{
fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta),
fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta),

View File

@ -29,7 +29,7 @@ var (
drivers map[string]InitFunc
)
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
// CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string

View File

@ -53,7 +53,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
// write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)

View File

@ -171,7 +171,7 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr
if all || len(i.imageStore.Children(id)) == 0 {
if imageFilters.Contains("dangling") && !danglingOnly {
//dangling=false case, so dangling image is not needed
// dangling=false case, so dangling image is not needed
continue
}
if imageFilters.Contains("reference") { // skip images with no references if filtering by reference

View File

@ -56,7 +56,7 @@ func (l *Link) ToEnv() []string {
env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port()))
}
//sort the ports so that we can bulk the continuous ports together
// sort the ports so that we can bulk the continuous ports together
nat.Sort(l.Ports, func(ip, jp nat.Port) bool {
// If the two ports have the same number, tcp takes priority
// Sort in desc order

View File

@ -388,7 +388,7 @@ func portOp(key string, filter map[nat.Port]bool) func(value string) error {
if strings.Contains(value, ":") {
return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value)
}
//support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
// support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
proto, port := nat.SplitProtoPort(value)
start, end, err := nat.ParsePortRange(port)
if err != nil {

View File

@ -203,7 +203,7 @@ func TestCopierSlow(t *testing.T) {
}
var jsonBuf bytes.Buffer
//encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}
// encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}
jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond}
c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog)

View File

@ -109,7 +109,7 @@ func makeMarshaller() func(m *logger.Message) ([]byte, error) {
messageToProto(m, proto, md)
protoSize := proto.Size()
writeLen := protoSize + (2 * encodeBinaryLen) //+ len(messageDelimiter)
writeLen := protoSize + (2 * encodeBinaryLen) // + len(messageDelimiter)
if writeLen > len(buf) {
buf = make([]byte, writeLen)

View File

@ -39,8 +39,8 @@ type EndpointSettings struct {
// AttachmentStore stores the load balancer IP address for a network id.
type AttachmentStore struct {
sync.Mutex
//key: networkd id
//value: load balancer ip address
// key: networkd id
// value: load balancer ip address
networkToNodeLBIP map[string]net.IP
}

View File

@ -32,7 +32,7 @@ type tarexporter struct {
// LogImageEvent defines interface for event generation related to image tar(load and save) operations
type LogImageEvent interface {
//LogImageEvent generates an event related to an image operation
// LogImageEvent generates an event related to an image operation
LogImageEvent(imageID, refName, action string)
}

View File

@ -210,26 +210,21 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *testing.T) {
Name: "Dockerfile",
Size: int64(len(dockerfile)),
})
//failed to write tar file header
assert.NilError(c, err)
assert.NilError(c, err, "failed to write tar file header")
_, err = tw.Write(dockerfile)
// failed to write Dockerfile in tar file content
assert.NilError(c, err)
assert.NilError(c, err, "failed to write Dockerfile in tar file content")
err = tw.WriteHeader(&tar.Header{
Name: "dir/./file",
Size: int64(len(fileContents)),
})
//failed to write tar file header
assert.NilError(c, err)
assert.NilError(c, err, "failed to write tar file header")
_, err = tw.Write(fileContents)
// failed to write file contents in tar file content
assert.NilError(c, err)
assert.NilError(c, err, "failed to write file contents in tar file content")
// failed to close tar archive
assert.NilError(c, tw.Close())
assert.NilError(c, tw.Close(), "failed to close tar archive")
res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar"))
assert.NilError(c, err)

View File

@ -689,7 +689,7 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *testing.T) {
body.Close()
}
//Issue 14230. daemon should return 500 for invalid port syntax
// Issue 14230. daemon should return 500 for invalid port syntax
func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
config := `{
"Image": "busybox",

View File

@ -44,7 +44,7 @@ func (s *DockerSuite) TestAPIImagesFilter(c *testing.T) {
return images
}
//incorrect number of matches returned
// incorrect number of matches returned
images := getImages("utest*/*")
assert.Equal(c, len(images[0].RepoTags), 2)

View File

@ -356,7 +356,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
node := daemons[0].GetNode(c, task.NodeID)
assert.Equal(c, node.Spec.Role, swarm.NodeRoleWorker)
}
//remove service
// remove service
daemons[0].RemoveService(c, id)
// create service
@ -370,7 +370,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
node := daemons[0].GetNode(c, task.NodeID)
assert.Equal(c, node.Spec.Role, swarm.NodeRoleManager)
}
//remove service
// remove service
daemons[0].RemoveService(c, id)
// create service
@ -423,7 +423,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
for _, task := range tasks {
assert.Assert(c, task.NodeID == nodes[0].ID)
}
//remove service
// remove service
daemons[0].RemoveService(c, id)
// create service
@ -436,7 +436,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
for _, task := range tasks {
assert.Assert(c, task.NodeID != nodes[0].ID)
}
//remove service
// remove service
daemons[0].RemoveService(c, id)
constraints = []string{"node.labels.security==medium"}
@ -450,7 +450,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
for _, task := range tasks {
assert.Assert(c, task.NodeID == "")
}
//remove service
// remove service
daemons[0].RemoveService(c, id)
// multiple constraints

View File

@ -175,7 +175,7 @@ func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *testing.T) {
// try to inspect again - it should error this time
_, err = inspectFieldWithError(imageReference, "Id")
//unexpected nil err trying to inspect what should be a non-existent image
// unexpected nil err trying to inspect what should be a non-existent image
assert.ErrorContains(c, err, "No such object")
}
@ -255,8 +255,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *testing.T) {
assert.Assert(c, re1.MatchString(out), "expected %q: %s", re1.String(), out)
// setup image2
digest2, err := setupImageWithTag(c, "tag2")
//error setting up image
assert.NilError(c, err)
assert.NilError(c, err, "error setting up image")
imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2)
c.Logf("imageReference2 = %s", imageReference2)

View File

@ -38,7 +38,7 @@ func (s *DockerSuite) TestCommitWithoutPause(c *testing.T) {
dockerCmd(c, "inspect", cleanedImageID)
}
//test commit a paused container should not unpause it after commit
// TestCommitPausedContainer tests that a paused container is not unpaused after being committed
func (s *DockerSuite) TestCommitPausedContainer(c *testing.T) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-i", "-d", "busybox")

View File

@ -213,7 +213,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *testing.T) {
s.d.Start(c)
oldBasesizeBytes := getBaseDeviceSize(c, s.d)
var newBasesizeBytes int64 = 1073741824 //1GB in bytes
var newBasesizeBytes int64 = 1073741824 // 1GB in bytes
if newBasesizeBytes < oldBasesizeBytes {
err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes))
@ -234,7 +234,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *testing.T)
oldBasesizeBytes := getBaseDeviceSize(c, s.d)
var newBasesizeBytes int64 = 53687091200 //50GB in bytes
var newBasesizeBytes int64 = 53687091200 // 50GB in bytes
if newBasesizeBytes < oldBasesizeBytes {
c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes))))
@ -572,16 +572,16 @@ func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *testing.T) {
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *testing.T) {
//attempt to start daemon with incorrect flags (we know -b and --bip conflict)
// attempt to start daemon with incorrect flags (we know -b and --bip conflict)
if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
//verify we got the right error
// verify we got the right error
if !strings.Contains(err.Error(), "daemon exited") {
c.Fatalf("Expected daemon not to start, got %v", err)
}
// look in the log and make sure we got the message that daemon is shutting down
icmd.RunCommand("grep", "failed to start daemon", s.d.LogFileName()).Assert(c, icmd.Success)
} else {
//if we didn't get an error and the daemon is running, this is a failure
// if we didn't get an error and the daemon is running, this is a failure
c.Fatal("Conflicting options should cause the daemon to error out with a failure")
}
}
@ -697,7 +697,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *testing.T) {
s.d.Start(c, "--bip", bridgeIP)
//check if the iptables contains new bridgeIP MASQUERADE rule
// check if the iptables contains new bridgeIP MASQUERADE rule
ipTablesSearchString := bridgeIPNet.String()
icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{
Out: ipTablesSearchString,
@ -1203,7 +1203,7 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *testing.T) {
c.Fatalf("Error Unmarshal: %s", err)
}
//replace config.Kid with the fake value
// replace config.Kid with the fake value
config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
// NEW Data-Struct to byte[]

View File

@ -48,7 +48,7 @@ func (s *DockerSuite) TestEventsTimestampFormats(c *testing.T) {
events = events[:len(events)-1]
nEvents := len(events)
assert.Assert(c, nEvents >= 5) //Missing expected event
assert.Assert(c, nEvents >= 5)
containerEvents := eventActionsByIDAndType(c, events, name, "container")
assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
}
@ -99,7 +99,7 @@ func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *testing.T) {
events := strings.Split(out, "\n")
nEvents := len(events)
assert.Assert(c, nEvents >= 3) //Missing expected event
assert.Assert(c, nEvents >= 3)
matchedEvents := 0
for _, event := range events {
matches := eventstestutils.ScanMap(event)
@ -124,7 +124,7 @@ func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *testing.T) {
events = events[:len(events)-1]
nEvents := len(events)
assert.Assert(c, nEvents >= 5) //Missing expected event
assert.Assert(c, nEvents >= 5)
containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container")
assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
}
@ -664,7 +664,7 @@ func (s *DockerSuite) TestEventsContainerRestart(c *testing.T) {
events := strings.Split(strings.TrimSpace(out), "\n")
nEvents := len(events)
assert.Assert(c, nEvents >= 1) //Missing expected event
assert.Assert(c, nEvents >= 1)
actions := eventActionsByIDAndType(c, events, "testEvent", "container")
for _, a := range actions {

View File

@ -243,7 +243,7 @@ func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *testing.T) {
assert.Assert(c, strings.Contains(netEvents[0], "disconnect"))
assert.Assert(c, strings.Contains(netEvents[1], "disconnect"))
//both networks appeared in the network event output
// both networks appeared in the network event output
assert.Assert(c, strings.Contains(out, "test-event-network-local-1"))
assert.Assert(c, strings.Contains(out, "test-event-network-local-2"))
}

View File

@ -242,10 +242,10 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *testing.T)
assert.Equal(c, strings.Count(out, imageID), 1)
out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false")
//dangling=false would not include dangling images
// dangling=false would not include dangling images
assert.Assert(c, !strings.Contains(out, imageID))
out, _ = dockerCmd(c, "images")
//docker images still include dangling images
// docker images still include dangling images
assert.Assert(c, strings.Contains(out, imageID))
}

View File

@ -42,8 +42,8 @@ func (s *DockerSuite) TestInspectInt64(c *testing.T) {
}
func (s *DockerSuite) TestInspectDefault(c *testing.T) {
//Both the container and image are named busybox. docker inspect will fetch the container JSON.
//If the container JSON is not available, it will go for the image JSON.
// Both the container and image are named busybox. docker inspect will fetch the container JSON.
// If the container JSON is not available, it will go for the image JSON.
out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
containerID := strings.TrimSpace(out)
@ -78,8 +78,8 @@ func (s *DockerSuite) TestInspectStatus(c *testing.T) {
}
func (s *DockerSuite) TestInspectTypeFlagContainer(c *testing.T) {
//Both the container and image are named busybox. docker inspect will fetch container
//JSON State.Running field. If the field is true, it's a container.
// Both the container and image are named busybox. docker inspect will fetch container
// JSON State.Running field. If the field is true, it's a container.
runSleepingContainer(c, "--name=busybox", "-d")
formatStr := "--format={{.State.Running}}"
@ -88,9 +88,9 @@ func (s *DockerSuite) TestInspectTypeFlagContainer(c *testing.T) {
}
func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *testing.T) {
//Run this test on an image named busybox. docker inspect will try to fetch container
//JSON. Since there is no container named busybox and --type=container, docker inspect will
//not try to get the image JSON. It will throw an error.
// Run this test on an image named busybox. docker inspect will try to fetch container
// JSON. Since there is no container named busybox and --type=container, docker inspect will
// not try to get the image JSON. It will throw an error.
dockerCmd(c, "run", "-d", "busybox", "true")
@ -100,9 +100,9 @@ func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *testing.T) {
}
func (s *DockerSuite) TestInspectTypeFlagWithImage(c *testing.T) {
//Both the container and image are named busybox. docker inspect will fetch image
//JSON as --type=image. if there is no image with name busybox, docker inspect
//will throw an error.
// Both the container and image are named busybox. docker inspect will fetch image
// JSON as --type=image. if there is no image with name busybox, docker inspect
// will throw an error.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
@ -112,8 +112,8 @@ func (s *DockerSuite) TestInspectTypeFlagWithImage(c *testing.T) {
}
func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *testing.T) {
//Both the container and image are named busybox. docker inspect will fail
//as --type=foobar is not a valid value for the flag.
// Both the container and image are named busybox. docker inspect will fail
// as --type=foobar is not a valid value for the flag.
dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
@ -295,9 +295,8 @@ func (s *DockerSuite) TestInspectLogConfigNoType(c *testing.T) {
}
func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *testing.T) {
//Both the container and image are named busybox. docker inspect will fetch container
//JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields.
// Both the container and image are named busybox. docker inspect will fetch container
// JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields.
runSleepingContainer(c, "--name=busybox", "-d")

View File

@ -755,7 +755,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
// skip header
lines = lines[1:]
//ps output should have both the containers
// ps output should have both the containers
assert.Equal(c, len(RemoveLinesForExistingElements(lines, existing)), 2)
// Making sure onbridgenetwork and onnonenetwork is on the output

View File

@ -175,7 +175,7 @@ func (s *DockerSuite) TestRunWithoutNetworking(c *testing.T) {
}
}
//test --link use container name to link target
// test --link use container name to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *testing.T) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
@ -190,7 +190,7 @@ func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *testing.T) {
}
}
//test --link use container id to link target
// test --link use container id to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *testing.T) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
@ -1430,7 +1430,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
//take a copy of resolv.conf for restoring after test completes
// take a copy of resolv.conf for restoring after test completes
resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
c.Fatal(err)
@ -1447,14 +1447,14 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success)
}
//cleanup
// cleanup
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
}()
//1. test that a restarting container gets an updated resolv.conf
// 1. test that a restarting container gets an updated resolv.conf
dockerCmd(c, "run", "--name=first", "busybox", "true")
containerID1 := getIDByName(c, "first")
@ -1472,16 +1472,16 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
}
/* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
/* // make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
} */
//2. test that a restarting container does not receive resolv.conf updates
// 2. test that a restarting container does not receive resolv.conf updates
// if it modified the container copy of the starting point resolv.conf
dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
containerID2 := getIDByName(c, "second")
//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
// make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
@ -1495,7 +1495,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv))
}
//3. test that a running container's resolv.conf is not modified while running
// 3. test that a running container's resolv.conf is not modified while running
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
runningContainerID := strings.TrimSpace(out)
@ -1510,7 +1510,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
}
//4. test that a running container's resolv.conf is updated upon restart
// 4. test that a running container's resolv.conf is updated upon restart
// (the above container is still running..)
dockerCmd(c, "restart", runningContainerID)
@ -1520,7 +1520,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(tmpResolvConf), string(containerResolv))
}
//5. test that additions of a localhost resolver are cleaned from
// 5. test that additions of a localhost resolver are cleaned from
// host resolv.conf before updating container's resolv.conf copies
// replace resolv.conf with a localhost-only nameserver copy
@ -1539,7 +1539,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
}
//6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
// 6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
// of containers' resolv.conf.
// Restore the original resolv.conf
@ -1570,7 +1570,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
}
//cleanup, restore original resolv.conf happens in defer func()
// cleanup, restore original resolv.conf happens in defer func()
}
func (s *DockerSuite) TestRunAddHost(c *testing.T) {
@ -1958,7 +1958,7 @@ func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *testing.T) {
}
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
// sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
// TestRunCidFile tests that run --cidfile returns the longid
func (s *DockerSuite) TestRunCidFileCheckIDLength(c *testing.T) {
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
@ -2016,7 +2016,7 @@ func (s *DockerSuite) TestRunInspectMacAddress(c *testing.T) {
// test docker run use an invalid mac address
func (s *DockerSuite) TestRunWithInvalidMacAddress(c *testing.T) {
out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox")
//use an invalid mac address should with an error out
// use an invalid mac address should with an error out
if err == nil || !strings.Contains(out, "is not a valid mac address") {
c.Fatalf("run with an invalid --mac-address should with error out")
}
@ -2148,7 +2148,7 @@ func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *testing.T) {
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
}
//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
// GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
func (s *DockerSuite) TestRunCreateVolumeEtc(c *testing.T) {
// While Windows supports volumes, it does not support --add-host hence
// this test is not applicable on Windows.

View File

@ -66,7 +66,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *testing.T) {
buf := make([]byte, 1024)
n, err := pty.Read(buf)
assert.NilError(c, err) //could not read tty output
assert.NilError(c, err, "could not read tty output")
assert.Assert(c, strings.Contains(string(buf[:n]), "cowardly refusing"), "help output is not being yielded")
}

View File

@ -36,19 +36,19 @@ func (s *DockerSuite) TestSearchCmdOptions(c *testing.T) {
outSearchCmd, _ := dockerCmd(c, "search", "busybox")
assert.Assert(c, strings.Count(outSearchCmd, "\n") > 3, outSearchCmd)
outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") // The busybox is a busybox base image, not an AUTOMATED image.
outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
for i := range outSearchCmdautomatedSlice {
assert.Assert(c, !strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), "The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)
}
outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image.
outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") // The busybox is a busybox base image, official image.
outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n")
for i := range outSearchCmdNotOfficialSlice {
assert.Assert(c, !strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), "The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)
}
outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image.
outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") // The busybox is a busybox base image, official image.
outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n")
assert.Equal(c, len(outSearchCmdOfficialSlice), 3) // 1 header, 1 line, 1 carriage return
assert.Assert(c, strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), "The busybox is an OFFICIAL image: %s", outSearchCmdOfficial)

View File

@ -227,7 +227,7 @@ func TestServiceUpdateNetwork(t *testing.T) {
assert.NilError(t, err)
assert.Assert(t, len(netInfo.Containers) == 2, "Expected 2 endpoints, one for container and one for LB Sandbox")
//Remove network from service
// Remove network from service
service.Spec.TaskTemplate.Networks = []swarmtypes.NetworkAttachmentConfig{}
_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)

View File

@ -117,42 +117,42 @@ func (c *client) Version(ctx context.Context) (containerd.Version, error) {
// Isolation=Process example:
//
// {
// "SystemType": "Container",
// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Owner": "docker",
// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
// "IgnoreFlushesDuringBoot": true,
// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "5e0055c814a6",
// "MappedDirectories": [],
// "HvPartition": false,
// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
//}
// "SystemType": "Container",
// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Owner": "docker",
// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
// "IgnoreFlushesDuringBoot": true,
// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "5e0055c814a6",
// "MappedDirectories": [],
// "HvPartition": false,
// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
// }
//
// Isolation=Hyper-V example:
//
//{
// "SystemType": "Container",
// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
// "Owner": "docker",
// "IgnoreFlushesDuringBoot": true,
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "475c2c58933b",
// "MappedDirectories": [],
// "HvPartition": true,
// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
// "DNSSearchList": "a.com,b.com,c.com",
// "HvRuntime": {
// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
// },
//}
// {
// "SystemType": "Container",
// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
// "Owner": "docker",
// "IgnoreFlushesDuringBoot": true,
// "Layers": [{
// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
// }],
// "HostName": "475c2c58933b",
// "MappedDirectories": [],
// "HvPartition": true,
// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
// "DNSSearchList": "a.com,b.com,c.com",
// "HvRuntime": {
// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
// },
// }
func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
if ctr := c.getContainer(id); ctr != nil {
return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))

View File

@ -14,7 +14,7 @@ func TestSerialization(t *testing.T) {
)
q.Append("aaa", func() {
//simulate a long time task
// simulate a long time task
time.Sleep(10 * time.Millisecond)
assert.Equal(t, serialization, 1)
serialization = 2

View File

@ -442,7 +442,7 @@ func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownO
}
// canonicalTarName provides a platform-independent and consistent posix-style
//path for files and directories to be archived regardless of the platform.
// path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) string {
name = CanonicalTarNameForPath(name)
@ -495,13 +495,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
}
//check whether the file is overlayfs whiteout
//if yes, skip re-mapping container ID mappings.
// check whether the file is overlayfs whiteout
// if yes, skip re-mapping container ID mappings.
isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host
// handle re-mapping container ID mappings back to host ID mappings before
// writing tar headers/files. We skip whiteout files because they were written
// by the kernel and already have proper ownership relative to the host
if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
fileIDPair, err := getFileUIDGID(fi.Sys())
if err != nil {

View File

@ -282,31 +282,31 @@ func TestCopyInfoDestinationPathSymlink(t *testing.T) {
}
testData := []FileTestData{
//Create a directory: /tmp/archive-copy-test*/dir1
//Test will "copy" file1 to dir1
// Create a directory: /tmp/archive-copy-test*/dir1
// Test will "copy" file1 to dir1
{resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}},
//Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1
//Test will "copy" file2 to dirSymlink
// Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1
// Test will "copy" file2 to dirSymlink
{resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}},
//Create a file in tmp directory: /tmp/archive-copy-test*/file1
//Test to cover when the full file path already exists.
// Create a file in tmp directory: /tmp/archive-copy-test*/file1
// Test to cover when the full file path already exists.
{resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}},
//Create a directory: /tmp/archive-copy*/dir2
//Test to cover when the full directory path already exists
// Create a directory: /tmp/archive-copy*/dir2
// Test to cover when the full directory path already exists
{resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}},
//Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget
//Negative test to cover symlinking to a target that does not exit
// Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget
// Negative test to cover symlinking to a target that does not exit
{resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}},
//Create a file in tmp directory for next test: /tmp/existingfile
// Create a file in tmp directory for next test: /tmp/existingfile
{resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
//Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile
//Test to cover when the parent directory of a new file is a symlink
// Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile
// Test to cover when the parent directory of a new file is a symlink
{resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
}

View File

@ -31,7 +31,7 @@ func CanonicalTarNameForPath(p string) string {
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows

View File

@ -96,8 +96,8 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) {
t.Fatal(err)
}
options := &archive.TarOptions{}
//65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
//on most systems when passed via environment or command line arguments
// 65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
// on most systems when passed via environment or command line arguments
excludes := make([]string, 65534)
for i := 0; i < 65534; i++ {
excludes[i] = strings.Repeat(string(i), 64)

View File

@ -28,7 +28,7 @@ func untar() {
var options archive.TarOptions
//read the options from the pipe "ExtraFiles"
// read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
@ -100,7 +100,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
// write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("Untar json encode to pipe failed: %v", err)

View File

@ -194,7 +194,7 @@ func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows

View File

@ -18,8 +18,8 @@ func resolveBinary(binname string) (string, error) {
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
// only return no error if the final resolved binary basename
// matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}

View File

@ -178,7 +178,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
clearLine(out)
endl = "\r"
fmt.Fprint(out, endl)
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
} else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {

View File

@ -13,7 +13,7 @@ import (
"unsafe"
)
//parseMountTable returns information about mounted filesystems
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
var rawEntries *C.struct_statfs

View File

@ -34,7 +34,7 @@ func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action stri
func (p *Reader) Read(buf []byte) (n int, err error) {
read, err := p.in.Read(buf)
p.current += int64(read)
updateEvery := int64(1024 * 512) //512kB
updateEvery := int64(1024 * 512) // 512kB
if p.size > 0 {
// Update progress for every 1% read if 1% < 512kB
if increment := int64(0.01 * float64(p.size)); increment < updateEvery {

View File

@ -61,7 +61,7 @@ func Trap(cleanup func(), logger interface {
DumpStacks("")
logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
}
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
// for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
os.Exit(128 + int(sig.(syscall.Signal)))
}(sig)
}

View File

@ -6,9 +6,9 @@ import (
"time"
)
//setCTime will set the create time on a file. On Unix, the create
//time is updated as a side effect of setting the modified time, so
//no action is required.
// setCTime will set the create time on a file. On Unix, the create
// time is updated as a side effect of setting the modified time, so
// no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}

View File

@ -6,8 +6,8 @@ import (
"golang.org/x/sys/windows"
)
//setCTime will set the create time on a file. On Windows, this requires
//calling SetFileTime and explicitly including the create time.
// setCTime will set the create time on a file. On Windows, this requires
// calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path)

View File

@ -235,7 +235,7 @@ func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle,
createmode = windows.OPEN_EXISTING
}
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
return h, e

View File

@ -18,7 +18,7 @@ var eol = []byte("\n")
// ErrNonPositiveLinesNumber is an error returned if the lines number was negative.
var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive")
//TailFile returns last n lines of the passed in file.
// TailFile returns last n lines of the passed in file.
func TailFile(f *os.File, n int) ([][]byte, error) {
size, err := f.Seek(0, io.SeekEnd)
if err != nil {

View File

@ -268,7 +268,7 @@ func requiresAuth(w http.ResponseWriter, r *http.Request) bool {
value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())
cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600}
http.SetCookie(w, cookie)
//FIXME(sam): this should be sent only on Index routes
// FIXME(sam): this should be sent only on Index routes
value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano())
w.Header().Add("X-Docker-Token", value)
}