Browse Source

Merge pull request #13961 from unclejack/linting_changes

linting changes
David Calavera 10 years ago
parent
commit
b6531d4ec3

+ 1 - 1
pkg/archive/archive_unix.go

@@ -11,7 +11,7 @@ import (
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/system"
 )
 )
 
 
-// canonicalTarNameForPath returns platform-specific filepath
+// CanonicalTarNameForPath returns platform-specific filepath
 // to canonical posix-style path for tar archival. p is relative
 // to canonical posix-style path for tar archival. p is relative
 // path.
 // path.
 func CanonicalTarNameForPath(p string) (string, error) {
 func CanonicalTarNameForPath(p string) (string, error) {

+ 1 - 1
pkg/archive/utils_test.go

@@ -133,7 +133,7 @@ func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
 		helloStat.Size() != fi.Size() ||
 		helloStat.Size() != fi.Size() ||
 		!bytes.Equal(helloData, b) {
 		!bytes.Equal(helloData, b) {
 		// codepath taken if hello has been modified
 		// codepath taken if hello has been modified
-		return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
+		return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
 	}
 	}
 
 
 	// Check that nothing in dest/ has the same content as victim/hello.
 	// Check that nothing in dest/ has the same content as victim/hello.

+ 12 - 7
pkg/chrootarchive/archive.go

@@ -58,6 +58,10 @@ func untar() {
 	os.Exit(0)
 	os.Exit(0)
 }
 }
 
 
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+//  identity (uncompressed), gzip, bzip2, xz.
 func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
 func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
 	if tarArchive == nil {
 	if tarArchive == nil {
 		return fmt.Errorf("Empty archive")
 		return fmt.Errorf("Empty archive")
@@ -133,17 +137,18 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
 			return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
 			return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
 		}
 		}
 		return nil
 		return nil
-	} else {
-		cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
-		out, err := cmd.CombinedOutput()
-		if err != nil {
-			return fmt.Errorf("Untar %s %s", err, out)
-		}
-		return nil
 	}
 	}
+	cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Untar %s %s", err, out)
+	}
+	return nil
 
 
 }
 }
 
 
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
 func TarUntar(src, dst string) error {
 func TarUntar(src, dst string) error {
 	return chrootArchiver.TarUntar(src, dst)
 	return chrootArchiver.TarUntar(src, dst)
 }
 }

+ 3 - 0
pkg/chrootarchive/diff.go

@@ -69,6 +69,9 @@ func applyLayer() {
 	os.Exit(0)
 	os.Exit(0)
 }
 }
 
 
+// ApplyLayer parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
 func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
 func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
 	dest = filepath.Clean(dest)
 	dest = filepath.Clean(dest)
 	decompressed, err := archive.DecompressStream(layer)
 	decompressed, err := archive.DecompressStream(layer)

+ 2 - 2
pkg/fileutils/fileutils.go

@@ -20,7 +20,7 @@ func Empty(pattern string) bool {
 	return pattern == ""
 	return pattern == ""
 }
 }
 
 
-// Cleanpatterns takes a slice of patterns returns a new
+// CleanPatterns takes a slice of patterns returns a new
 // slice of patterns cleaned with filepath.Clean, stripped
 // slice of patterns cleaned with filepath.Clean, stripped
 // of any empty patterns and lets the caller know whether the
 // of any empty patterns and lets the caller know whether the
 // slice contains any exception patterns (prefixed with !).
 // slice contains any exception patterns (prefixed with !).
@@ -73,7 +73,7 @@ func Matches(file string, patterns []string) (bool, error) {
 	return OptimizedMatches(file, patterns, patDirs)
 	return OptimizedMatches(file, patterns, patDirs)
 }
 }
 
 
-// Matches is basically the same as fileutils.Matches() but optimized for archive.go.
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
 // It will assume that the inputs have been preprocessed and therefore the function
 // It will assume that the inputs have been preprocessed and therefore the function
 // doen't need to do as much error checking and clean-up. This was done to avoid
 // doen't need to do as much error checking and clean-up. This was done to avoid
 // repeating these steps on each file being checked during the archive process.
 // repeating these steps on each file being checked during the archive process.

+ 2 - 0
pkg/graphdb/conn_sqlite3.go

@@ -8,6 +8,8 @@ import (
 	_ "code.google.com/p/gosqlite/sqlite3" // registers sqlite
 	_ "code.google.com/p/gosqlite/sqlite3" // registers sqlite
 )
 )
 
 
+// NewSqliteConn opens a connection to a sqlite
+// database.
 func NewSqliteConn(root string) (*Database, error) {
 func NewSqliteConn(root string) (*Database, error) {
 	conn, err := sql.Open("sqlite3", root)
 	conn, err := sql.Open("sqlite3", root)
 	if err != nil {
 	if err != nil {

+ 21 - 13
pkg/graphdb/graphdb.go

@@ -41,17 +41,25 @@ type Edge struct {
 	ParentID string
 	ParentID string
 }
 }
 
 
+// Entities stores the list of entities
 type Entities map[string]*Entity
 type Entities map[string]*Entity
+
+// Edges stores the relationships between entities
 type Edges []*Edge
 type Edges []*Edge
 
 
+// WalkFunc is a function invoked to process an individual entity
 type WalkFunc func(fullPath string, entity *Entity) error
 type WalkFunc func(fullPath string, entity *Entity) error
 
 
-// Graph database for storing entities and their relationships
+// Database is a graph database for storing entities and their relationships
 type Database struct {
 type Database struct {
 	conn *sql.DB
 	conn *sql.DB
 	mux  sync.RWMutex
 	mux  sync.RWMutex
 }
 }
 
 
+// IsNonUniqueNameError processes the error to check if it's caused by
+// a constraint violation.
+// This is necessary because the error isn't the same across various
+// sqlite versions.
 func IsNonUniqueNameError(err error) bool {
 func IsNonUniqueNameError(err error) bool {
 	str := err.Error()
 	str := err.Error()
 	// sqlite 3.7.17-1ubuntu1 returns:
 	// sqlite 3.7.17-1ubuntu1 returns:
@@ -72,7 +80,7 @@ func IsNonUniqueNameError(err error) bool {
 	return false
 	return false
 }
 }
 
 
-// Create a new graph database initialized with a root entity
+// NewDatabase creates a new graph database initialized with a root entity
 func NewDatabase(conn *sql.DB) (*Database, error) {
 func NewDatabase(conn *sql.DB) (*Database, error) {
 	if conn == nil {
 	if conn == nil {
 		return nil, fmt.Errorf("Database connection cannot be nil")
 		return nil, fmt.Errorf("Database connection cannot be nil")
@@ -163,7 +171,7 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
 	return e, nil
 	return e, nil
 }
 }
 
 
-// Return true if a name already exists in the database
+// Exists returns true if a name already exists in the database
 func (db *Database) Exists(name string) bool {
 func (db *Database) Exists(name string) bool {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -190,14 +198,14 @@ func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) erro
 	return nil
 	return nil
 }
 }
 
 
-// Return the root "/" entity for the database
+// RootEntity returns the root "/" entity for the database
 func (db *Database) RootEntity() *Entity {
 func (db *Database) RootEntity() *Entity {
 	return &Entity{
 	return &Entity{
 		id: "0",
 		id: "0",
 	}
 	}
 }
 }
 
 
-// Return the entity for a given path
+// Get returns the entity for a given path
 func (db *Database) Get(name string) *Entity {
 func (db *Database) Get(name string) *Entity {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -274,7 +282,7 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
 	return nil
 	return nil
 }
 }
 
 
-// Return the children of the specified entity
+// Children returns the children of the specified entity
 func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
 func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -287,7 +295,7 @@ func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
 	return db.children(e, name, depth, nil)
 	return db.children(e, name, depth, nil)
 }
 }
 
 
-// Return the parents of a specified entity
+// Parents returns the parents of a specified entity
 func (db *Database) Parents(name string) ([]string, error) {
 func (db *Database) Parents(name string) ([]string, error) {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -299,7 +307,7 @@ func (db *Database) Parents(name string) ([]string, error) {
 	return db.parents(e)
 	return db.parents(e)
 }
 }
 
 
-// Return the refrence count for a specified id
+// Refs returns the refrence count for a specified id
 func (db *Database) Refs(id string) int {
 func (db *Database) Refs(id string) int {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -311,7 +319,7 @@ func (db *Database) Refs(id string) int {
 	return count
 	return count
 }
 }
 
 
-// Return all the id's path references
+// RefPaths returns all the id's path references
 func (db *Database) RefPaths(id string) Edges {
 func (db *Database) RefPaths(id string) Edges {
 	db.mux.RLock()
 	db.mux.RLock()
 	defer db.mux.RUnlock()
 	defer db.mux.RUnlock()
@@ -360,7 +368,7 @@ func (db *Database) Delete(name string) error {
 	return nil
 	return nil
 }
 }
 
 
-// Remove the entity with the specified id
+// Purge removes the entity with the specified id
 // Walk the graph to make sure all references to the entity
 // Walk the graph to make sure all references to the entity
 // are removed and return the number of references removed
 // are removed and return the number of references removed
 func (db *Database) Purge(id string) (int, error) {
 func (db *Database) Purge(id string) (int, error) {
@@ -480,7 +488,7 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM
 		if depth != 0 {
 		if depth != 0 {
 			nDepth := depth
 			nDepth := depth
 			if depth != -1 {
 			if depth != -1 {
-				nDepth -= 1
+				nDepth--
 			}
 			}
 			entities, err = db.children(child, meta.FullPath, nDepth, entities)
 			entities, err = db.children(child, meta.FullPath, nDepth, entities)
 			if err != nil {
 			if err != nil {
@@ -523,12 +531,12 @@ func (db *Database) child(parent *Entity, name string) *Entity {
 	return &Entity{id}
 	return &Entity{id}
 }
 }
 
 
-// Return the id used to reference this entity
+// ID returns the id used to reference this entity
 func (e *Entity) ID() string {
 func (e *Entity) ID() string {
 	return e.id
 	return e.id
 }
 }
 
 
-// Return the paths sorted by depth
+// Paths returns the paths sorted by depth
 func (e Entities) Paths() []string {
 func (e Entities) Paths() []string {
 	out := make([]string, len(e))
 	out := make([]string, len(e))
 	var i int
 	var i int

+ 1 - 1
pkg/graphdb/utils.go

@@ -10,7 +10,7 @@ func split(p string) []string {
 	return strings.Split(p, "/")
 	return strings.Split(p, "/")
 }
 }
 
 
-// Returns the depth or number of / in a given path
+// PathDepth returns the depth or number of / in a given path
 func PathDepth(p string) int {
 func PathDepth(p string) int {
 	parts := split(p)
 	parts := split(p)
 	if len(parts) == 2 && parts[1] == "" {
 	if len(parts) == 2 && parts[1] == "" {

+ 2 - 1
pkg/httputils/httputils.go

@@ -7,7 +7,7 @@ import (
 	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/jsonmessage"
 )
 )
 
 
-// Request a given URL and return an io.Reader
+// Download requests a given URL and returns an io.Reader
 func Download(url string) (resp *http.Response, err error) {
 func Download(url string) (resp *http.Response, err error) {
 	if resp, err = http.Get(url); err != nil {
 	if resp, err = http.Get(url); err != nil {
 		return nil, err
 		return nil, err
@@ -18,6 +18,7 @@ func Download(url string) (resp *http.Response, err error) {
 	return resp, nil
 	return resp, nil
 }
 }
 
 
+// NewHTTPRequestError returns a JSON response error
 func NewHTTPRequestError(msg string, res *http.Response) error {
 func NewHTTPRequestError(msg string, res *http.Response) error {
 	return &jsonmessage.JSONError{
 	return &jsonmessage.JSONError{
 		Message: msg,
 		Message: msg,

+ 2 - 0
pkg/httputils/resumablerequestreader.go

@@ -26,6 +26,8 @@ func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, tot
 	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
 	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
 }
 }
 
 
+// ResumableRequestReaderWithInitialResponse makes it possible to resume
+// reading the body of an already initiated request.
 func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
 func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
 	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
 	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
 }
 }

+ 1 - 1
pkg/listenbuffer/buffer.go

@@ -1,5 +1,5 @@
 /*
 /*
-listenbuffer uses the kernel's listening backlog functionality to queue
+Package listenbuffer uses the kernel's listening backlog functionality to queue
 connections, allowing applications to start listening immediately and handle
 connections, allowing applications to start listening immediately and handle
 connections later. This is signaled by closing the activation channel passed to
 connections later. This is signaled by closing the activation channel passed to
 the constructor.
 the constructor.