fix some typos from module contrib to man

Signed-off-by: Aaron.L.Xu <likexu@harmonycloud.cn>
This commit is contained in:
Aaron.L.Xu 2017-02-16 20:08:57 +08:00
parent d26cf30a60
commit e0577d5fe8
18 changed files with 19 additions and 19 deletions

View File

@ -20,7 +20,7 @@ static int child_exec(void *stuff)
{
struct clone_args *args = (struct clone_args *)stuff;
if (execvp(args->argv[0], args->argv) != 0) {
fprintf(stderr, "failed to execvp argments %s\n",
fprintf(stderr, "failed to execvp arguments %s\n",
strerror(errno));
exit(-1);
}

View File

@ -20,7 +20,7 @@ static int child_exec(void *stuff)
{
struct clone_args *args = (struct clone_args *)stuff;
if (execvp(args->argv[0], args->argv) != 0) {
fprintf(stderr, "failed to execvp argments %s\n",
fprintf(stderr, "failed to execvp arguments %s\n",
strerror(errno));
exit(-1);
}

View File

@ -10,7 +10,7 @@ import (
gogotypes "github.com/gogo/protobuf/types"
)
func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
if na != nil {
return types.NetworkAttachment{
Network: networkFromGRPC(na.Network),

View File

@ -179,7 +179,7 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
case types.UpdateFailureActionContinue:
failureAction = swarmapi.UpdateConfig_CONTINUE
default:
return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction)
return swarmapi.ServiceSpec{}, fmt.Errorf("unrecognized update failure action %s", s.UpdateConfig.FailureAction)
}
spec.Update = &swarmapi.UpdateConfig{
Parallelism: s.UpdateConfig.Parallelism,

View File

@ -60,7 +60,7 @@ func TaskFromGRPC(t swarmapi.Task) types.Task {
// NetworksAttachments
for _, na := range t.Networks {
task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na))
task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na))
}
if t.Status.PortStatus == nil {

View File

@ -22,7 +22,7 @@ func NewRefCounter(c Checker) *RefCounter {
}
}
// Increment increaes the ref count for the given id and returns the current count
// Increment increases the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]

View File

@ -30,7 +30,7 @@ var (
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites.
// ErrPrerequisites returned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")

View File

@ -171,7 +171,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
}
if withExtraAttrs {
// lazyly init variables
// lazily init variables
if imagesMap == nil {
allContainers = daemon.List()
allLayers = daemon.layerStore.Map()

View File

@ -87,7 +87,7 @@ func initGCP() {
// These will fail on instances if the metadata service is
// down or the client is compiled with an API version that
// has been removed. Since these are not vital, let's ignore
// them and make their fields in the dockeLogEntry ,omitempty
// them and make their fields in the dockerLogEntry ,omitempty
projectID, _ = metadata.ProjectID()
zone, _ = metadata.Zone()
instanceName, _ = metadata.InstanceName()
@ -111,7 +111,7 @@ func New(info logger.Info) (logger.Logger, error) {
project = projectID
}
if project == "" {
return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project")
return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project")
}
// Issue #29344: gcplogs segfaults (static binary)

View File

@ -104,7 +104,7 @@ func TestRingClose(t *testing.T) {
t.Fatal("expected empty queue")
}
if m, err := r.Dequeue(); err == nil || m != nil {
t.Fatal("exepcted err on Dequeue after close")
t.Fatal("expected err on Dequeue after close")
}
ls := r.Drain()

View File

@ -74,7 +74,7 @@ func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content
}
// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances
// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium
// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum
// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution)
func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string {
timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00")

View File

@ -253,7 +253,7 @@ func backportMountSpec(container *container.Container) error {
m.Type = mounttypes.TypeVolume
m.Spec.Type = mounttypes.TypeVolume
// make sure this is not an anyonmous volume before setting the spec source
// make sure this is not an anonymous volume before setting the spec source
if _, exists := container.Config.Volumes[target]; !exists {
m.Spec.Source = m.Name
}

View File

@ -12,7 +12,7 @@ import (
// setupMounts configures the mount points for a container by appending each
// of the configured mounts on the container to the OCI mount structure
// which will ultimately be passed into the oci runtime during container creation.
// It also ensures each of the mounts are lexographically sorted.
// It also ensures each of the mounts are lexicographically sorted.
// BUGBUG TODO Windows containerd. This would be much better if it returned
// an array of runtime spec mounts, not container mounts. Then no need to

View File

@ -585,7 +585,7 @@ func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, ma
}
// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The
// array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain
// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain
// only metadata entries having registry part of SourceRepository matching the part of repoInfo.
func getRepositoryMountCandidates(
repoInfo reference.Named,

View File

@ -679,7 +679,7 @@ Use `df <source-dir>` to figure out the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to figure out propagation
properties of source mount. If `findmnt` utility is not available, then one
can look at mount entry for source mount point in `/proc/self/mountinfo`. Look
at `optional fields` and see if any propagaion properties are specified.
at `optional fields` and see if any propagation properties are specified.
`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if
nothing is there that means mount is `private`.

View File

@ -31,5 +31,5 @@ docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`
the required device when it is added.
NOTE: initially present devices still need to be explicitely added to
NOTE: initially present devices still need to be explicitly added to
the create/run command

View File

@ -60,7 +60,7 @@ Use `df <source-dir>` to figure out the source mount and then use
`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to figure out propagation
properties of source mount. If `findmnt` utility is not available, then one
can look at mount entry for source mount point in `/proc/self/mountinfo`. Look
at `optional fields` and see if any propagaion properties are specified.
at `optional fields` and see if any propagation properties are specified.
`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if
nothing is there that means mount is `private`.

View File

@ -36,7 +36,7 @@ Filter output based on these conditions:
ID NAME DESCRIPTION ENABLED
869080b57404 tiborvass/sample-volume-plugin:latest A sample volume plugin for Docker true
## Display plguins with `volumedriver` capability
## Display plugins with `volumedriver` capability
$ docker plugin ls --filter capability=volumedriver --format "table {{.ID}}\t{{.Name}}"
ID Name