mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
linting changes
Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
This commit is contained in:
parent
e69df2589c
commit
2f6e4fdb29
11 changed files with 48 additions and 27 deletions
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// canonicalTarNameForPath returns platform-specific filepath
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
|
|
|
@ -133,7 +133,7 @@ func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
|
|||
helloStat.Size() != fi.Size() ||
|
||||
!bytes.Equal(helloData, b) {
|
||||
// codepath taken if hello has been modified
|
||||
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
|
||||
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
|
||||
}
|
||||
|
||||
// Check that nothing in dest/ has the same content as victim/hello.
|
||||
|
|
|
@ -58,6 +58,10 @@ func untar() {
|
|||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
|
@ -133,17 +137,18 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
|
|||
return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Untar %s %s", err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Untar %s %s", err, out)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
||||
func TarUntar(src, dst string) error {
|
||||
return chrootArchiver.TarUntar(src, dst)
|
||||
}
|
||||
|
|
|
@ -69,6 +69,9 @@ func applyLayer() {
|
|||
os.Exit(0)
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
|
|
|
@ -20,7 +20,7 @@ func Empty(pattern string) bool {
|
|||
return pattern == ""
|
||||
}
|
||||
|
||||
// Cleanpatterns takes a slice of patterns returns a new
|
||||
// CleanPatterns takes a slice of patterns returns a new
|
||||
// slice of patterns cleaned with filepath.Clean, stripped
|
||||
// of any empty patterns and lets the caller know whether the
|
||||
// slice contains any exception patterns (prefixed with !).
|
||||
|
@ -73,7 +73,7 @@ func Matches(file string, patterns []string) (bool, error) {
|
|||
return OptimizedMatches(file, patterns, patDirs)
|
||||
}
|
||||
|
||||
// Matches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||
// It will assume that the inputs have been preprocessed and therefore the function
|
||||
// doen't need to do as much error checking and clean-up. This was done to avoid
|
||||
// repeating these steps on each file being checked during the archive process.
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
_ "code.google.com/p/gosqlite/sqlite3" // registers sqlite
|
||||
)
|
||||
|
||||
// NewSqliteConn opens a connection to a sqlite
|
||||
// database.
|
||||
func NewSqliteConn(root string) (*Database, error) {
|
||||
conn, err := sql.Open("sqlite3", root)
|
||||
if err != nil {
|
||||
|
|
|
@ -41,17 +41,25 @@ type Edge struct {
|
|||
ParentID string
|
||||
}
|
||||
|
||||
// Entities stores the list of entities
|
||||
type Entities map[string]*Entity
|
||||
|
||||
// Edges stores the relationships between entities
|
||||
type Edges []*Edge
|
||||
|
||||
// WalkFunc is a function invoked to process an individual entity
|
||||
type WalkFunc func(fullPath string, entity *Entity) error
|
||||
|
||||
// Graph database for storing entities and their relationships
|
||||
// Database is a graph database for storing entities and their relationships
|
||||
type Database struct {
|
||||
conn *sql.DB
|
||||
mux sync.RWMutex
|
||||
}
|
||||
|
||||
// IsNonUniqueNameError processes the error to check if it's caused by
|
||||
// a constraint violation.
|
||||
// This is necessary because the error isn't the same across various
|
||||
// sqlite versions.
|
||||
func IsNonUniqueNameError(err error) bool {
|
||||
str := err.Error()
|
||||
// sqlite 3.7.17-1ubuntu1 returns:
|
||||
|
@ -72,7 +80,7 @@ func IsNonUniqueNameError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Create a new graph database initialized with a root entity
|
||||
// NewDatabase creates a new graph database initialized with a root entity
|
||||
func NewDatabase(conn *sql.DB) (*Database, error) {
|
||||
if conn == nil {
|
||||
return nil, fmt.Errorf("Database connection cannot be nil")
|
||||
|
@ -163,7 +171,7 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
|
|||
return e, nil
|
||||
}
|
||||
|
||||
// Return true if a name already exists in the database
|
||||
// Exists returns true if a name already exists in the database
|
||||
func (db *Database) Exists(name string) bool {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -190,14 +198,14 @@ func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
// Return the root "/" entity for the database
|
||||
// RootEntity returns the root "/" entity for the database
|
||||
func (db *Database) RootEntity() *Entity {
|
||||
return &Entity{
|
||||
id: "0",
|
||||
}
|
||||
}
|
||||
|
||||
// Return the entity for a given path
|
||||
// Get returns the entity for a given path
|
||||
func (db *Database) Get(name string) *Entity {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -274,7 +282,7 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Return the children of the specified entity
|
||||
// Children returns the children of the specified entity
|
||||
func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -287,7 +295,7 @@ func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
|
|||
return db.children(e, name, depth, nil)
|
||||
}
|
||||
|
||||
// Return the parents of a specified entity
|
||||
// Parents returns the parents of a specified entity
|
||||
func (db *Database) Parents(name string) ([]string, error) {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -299,7 +307,7 @@ func (db *Database) Parents(name string) ([]string, error) {
|
|||
return db.parents(e)
|
||||
}
|
||||
|
||||
// Return the refrence count for a specified id
|
||||
// Refs returns the refrence count for a specified id
|
||||
func (db *Database) Refs(id string) int {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -311,7 +319,7 @@ func (db *Database) Refs(id string) int {
|
|||
return count
|
||||
}
|
||||
|
||||
// Return all the id's path references
|
||||
// RefPaths returns all the id's path references
|
||||
func (db *Database) RefPaths(id string) Edges {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
@ -360,7 +368,7 @@ func (db *Database) Delete(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Remove the entity with the specified id
|
||||
// Purge removes the entity with the specified id
|
||||
// Walk the graph to make sure all references to the entity
|
||||
// are removed and return the number of references removed
|
||||
func (db *Database) Purge(id string) (int, error) {
|
||||
|
@ -480,7 +488,7 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM
|
|||
if depth != 0 {
|
||||
nDepth := depth
|
||||
if depth != -1 {
|
||||
nDepth -= 1
|
||||
nDepth--
|
||||
}
|
||||
entities, err = db.children(child, meta.FullPath, nDepth, entities)
|
||||
if err != nil {
|
||||
|
@ -523,12 +531,12 @@ func (db *Database) child(parent *Entity, name string) *Entity {
|
|||
return &Entity{id}
|
||||
}
|
||||
|
||||
// Return the id used to reference this entity
|
||||
// ID returns the id used to reference this entity
|
||||
func (e *Entity) ID() string {
|
||||
return e.id
|
||||
}
|
||||
|
||||
// Return the paths sorted by depth
|
||||
// Paths returns the paths sorted by depth
|
||||
func (e Entities) Paths() []string {
|
||||
out := make([]string, len(e))
|
||||
var i int
|
||||
|
|
|
@ -10,7 +10,7 @@ func split(p string) []string {
|
|||
return strings.Split(p, "/")
|
||||
}
|
||||
|
||||
// Returns the depth or number of / in a given path
|
||||
// PathDepth returns the depth or number of / in a given path
|
||||
func PathDepth(p string) int {
|
||||
parts := split(p)
|
||||
if len(parts) == 2 && parts[1] == "" {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
)
|
||||
|
||||
// Request a given URL and return an io.Reader
|
||||
// Download requests a given URL and returns an io.Reader
|
||||
func Download(url string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
|
@ -18,6 +18,7 @@ func Download(url string) (resp *http.Response, err error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// NewHTTPRequestError returns a JSON response error
|
||||
func NewHTTPRequestError(msg string, res *http.Response) error {
|
||||
return &jsonmessage.JSONError{
|
||||
Message: msg,
|
||||
|
|
|
@ -26,6 +26,8 @@ func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, tot
|
|||
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
|
||||
}
|
||||
|
||||
// ResumableRequestReaderWithInitialResponse makes it possible to resume
|
||||
// reading the body of an already initiated request.
|
||||
func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
|
||||
return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
listenbuffer uses the kernel's listening backlog functionality to queue
|
||||
Package listenbuffer uses the kernel's listening backlog functionality to queue
|
||||
connections, allowing applications to start listening immediately and handle
|
||||
connections later. This is signaled by closing the activation channel passed to
|
||||
the constructor.
|
||||
|
|
Loading…
Reference in a new issue