.: remove trailing white spaces
blame tibor this one ;-) ``` find . -type f -not -name '*.png' -not -name '*.go' -not -name '*.md' -not -name '*.tar' -not -name '*.pem' -not -path './vendor/*' -not -path './.git/*' -not -path '*/testdata/*' -not -path './docs/*images*' -not -path '*/testfiles/*' -not -path './bundles/*' -not -path './docs/*static*/*' -not -path './docs/*article-img/*' -exec grep -HnEl '[[:space:]]$' {} \; | xargs sed -iE 's/[[:space:]]*$//' ``` Signed-off-by: Vincent Batts <vbatts@redhat.com>
This commit is contained in:
parent
a0cfe83435
commit
7617ec176d
20 changed files with 109 additions and 109 deletions
26
MAINTAINERS
26
MAINTAINERS
|
@ -113,7 +113,7 @@ It is the responsibility of the subsystem maintainers to process patches affecti
|
||||||
manner.
|
manner.
|
||||||
|
|
||||||
* If the change affects areas of the code which are not part of a subsystem,
|
* If the change affects areas of the code which are not part of a subsystem,
|
||||||
or if subsystem maintainers are unable to reach a timely decision, it must be approved by
|
or if subsystem maintainers are unable to reach a timely decision, it must be approved by
|
||||||
the core maintainers.
|
the core maintainers.
|
||||||
|
|
||||||
* If the change affects the UI or public APIs, or if it represents a major change in architecture,
|
* If the change affects the UI or public APIs, or if it represents a major change in architecture,
|
||||||
|
@ -200,11 +200,11 @@ for each.
|
||||||
2-code-review = "requires more code changes"
|
2-code-review = "requires more code changes"
|
||||||
1-design-review = "raises design concerns"
|
1-design-review = "raises design concerns"
|
||||||
4-merge = "general case"
|
4-merge = "general case"
|
||||||
|
|
||||||
# Docs approval
|
# Docs approval
|
||||||
[Rules.review.docs-approval]
|
[Rules.review.docs-approval]
|
||||||
# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
|
# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
|
||||||
# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR).
|
# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR).
|
||||||
|
|
||||||
# Merge
|
# Merge
|
||||||
[Rules.review.states.4-merge]
|
[Rules.review.states.4-merge]
|
||||||
|
@ -268,7 +268,7 @@ made through a pull request.
|
||||||
|
|
||||||
# The chief architect is responsible for the overall integrity of the technical architecture
|
# The chief architect is responsible for the overall integrity of the technical architecture
|
||||||
# across all subsystems, and the consistency of APIs and UI.
|
# across all subsystems, and the consistency of APIs and UI.
|
||||||
#
|
#
|
||||||
# Changes to UI, public APIs and overall architecture (for example a plugin system) must
|
# Changes to UI, public APIs and overall architecture (for example a plugin system) must
|
||||||
# be approved by the chief architect.
|
# be approved by the chief architect.
|
||||||
"Chief Architect" = "shykes"
|
"Chief Architect" = "shykes"
|
||||||
|
@ -314,7 +314,7 @@ made through a pull request.
|
||||||
]
|
]
|
||||||
|
|
||||||
# The chief maintainer is responsible for all aspects of quality for the project including
|
# The chief maintainer is responsible for all aspects of quality for the project including
|
||||||
# code reviews, usability, stability, security, performance, etc.
|
# code reviews, usability, stability, security, performance, etc.
|
||||||
# The most important function of the chief maintainer is to lead by example. On the first
|
# The most important function of the chief maintainer is to lead by example. On the first
|
||||||
# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
|
# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
|
||||||
# be fine".
|
# be fine".
|
||||||
|
@ -359,9 +359,9 @@ made through a pull request.
|
||||||
# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
|
# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
|
||||||
# for its quality.
|
# for its quality.
|
||||||
# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
|
# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
|
||||||
#
|
#
|
||||||
# The maintainers of each subsytem are responsible for:
|
# The maintainers of each subsytem are responsible for:
|
||||||
#
|
#
|
||||||
# 1. Exposing a clear road map for improving their subsystem.
|
# 1. Exposing a clear road map for improving their subsystem.
|
||||||
# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
|
# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
|
||||||
# 3. Be available to anyone with questions, bug reports, criticism etc.
|
# 3. Be available to anyone with questions, bug reports, criticism etc.
|
||||||
|
@ -371,9 +371,9 @@ made through a pull request.
|
||||||
# road map of the project.
|
# road map of the project.
|
||||||
#
|
#
|
||||||
# #### How to review patches to your subsystem
|
# #### How to review patches to your subsystem
|
||||||
#
|
#
|
||||||
# Accepting pull requests:
|
# Accepting pull requests:
|
||||||
#
|
#
|
||||||
# - If the pull request appears to be ready to merge, give it a `LGTM`, which
|
# - If the pull request appears to be ready to merge, give it a `LGTM`, which
|
||||||
# stands for "Looks Good To Me".
|
# stands for "Looks Good To Me".
|
||||||
# - If the pull request has some small problems that need to be changed, make
|
# - If the pull request has some small problems that need to be changed, make
|
||||||
|
@ -384,9 +384,9 @@ made through a pull request.
|
||||||
# - If the PR only needs a few changes before being merged, any MAINTAINER can
|
# - If the PR only needs a few changes before being merged, any MAINTAINER can
|
||||||
# make a replacement PR that incorporates the existing commits and fixes the
|
# make a replacement PR that incorporates the existing commits and fixes the
|
||||||
# problems before a fast track merge.
|
# problems before a fast track merge.
|
||||||
#
|
#
|
||||||
# Closing pull requests:
|
# Closing pull requests:
|
||||||
#
|
#
|
||||||
# - If a PR appears to be abandoned, after having attempted to contact the
|
# - If a PR appears to be abandoned, after having attempted to contact the
|
||||||
# original contributor, then a replacement PR may be made. Once the
|
# original contributor, then a replacement PR may be made. Once the
|
||||||
# replacement PR is made, any contributor may close the original one.
|
# replacement PR is made, any contributor may close the original one.
|
||||||
|
@ -584,12 +584,12 @@ made through a pull request.
|
||||||
Name = "Solomon Hykes"
|
Name = "Solomon Hykes"
|
||||||
Email = "solomon@docker.com"
|
Email = "solomon@docker.com"
|
||||||
GitHub = "shykes"
|
GitHub = "shykes"
|
||||||
|
|
||||||
[people.spf13]
|
[people.spf13]
|
||||||
Name = "Steve Francia"
|
Name = "Steve Francia"
|
||||||
Email = "steve.francia@gmail.com"
|
Email = "steve.francia@gmail.com"
|
||||||
GitHub = "spf13"
|
GitHub = "spf13"
|
||||||
|
|
||||||
[people.sven]
|
[people.sven]
|
||||||
Name = "Sven Dowideit"
|
Name = "Sven Dowideit"
|
||||||
Email = "SvenDowideit@home.org.au"
|
Email = "SvenDowideit@home.org.au"
|
||||||
|
|
4
NOTICE
4
NOTICE
|
@ -10,9 +10,9 @@ The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
Use and transfer of Docker may be subject to certain restrictions by the
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
United States and other governments.
|
United States and other governments.
|
||||||
It is your responsibility to ensure that your use and/or transfer does not
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
violate applicable laws.
|
violate applicable laws.
|
||||||
|
|
||||||
For more information, please see http://www.bis.doc.gov
|
For more information, please see http://www.bis.doc.gov
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ hello\\ | hello\
|
||||||
'hello\' | hello\
|
'hello\' | hello\
|
||||||
"''" | ''
|
"''" | ''
|
||||||
$. | $.
|
$. | $.
|
||||||
$1 |
|
$1 |
|
||||||
he$1x | hex
|
he$1x | hex
|
||||||
he$.x | he$.x
|
he$.x | he$.x
|
||||||
he$pwd. | he.
|
he$pwd. | he.
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
# must have access to the socket for the completions to function correctly
|
# must have access to the socket for the completions to function correctly
|
||||||
#
|
#
|
||||||
# Note for developers:
|
# Note for developers:
|
||||||
# Please arrange options sorted alphabetically by long name with the short
|
# Please arrange options sorted alphabetically by long name with the short
|
||||||
# options immediately following their corresponding long form.
|
# options immediately following their corresponding long form.
|
||||||
# This order should be applied to lists, alternatives and code blocks.
|
# This order should be applied to lists, alternatives and code blocks.
|
||||||
|
|
||||||
|
@ -257,8 +257,8 @@ _docker_build() {
|
||||||
;;
|
;;
|
||||||
--file|-f)
|
--file|-f)
|
||||||
_filedir
|
_filedir
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
case "$cur" in
|
case "$cur" in
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# AUTHOR: Jessica Frazelle <jess@docker.com>
|
# AUTHOR: Jessica Frazelle <jess@docker.com>
|
||||||
# COMMENTS:
|
# COMMENTS:
|
||||||
# This file describes how to build a gparted container with all
|
# This file describes how to build a gparted container with all
|
||||||
# dependencies installed. It uses native X11 unix socket.
|
# dependencies installed. It uses native X11 unix socket.
|
||||||
# Tested on Debian Jessie
|
# Tested on Debian Jessie
|
||||||
# USAGE:
|
# USAGE:
|
||||||
# # Download gparted Dockerfile
|
# # Download gparted Dockerfile
|
||||||
|
|
|
@ -41,39 +41,39 @@ while [ $# -gt 0 ]; do
|
||||||
[ "$imageId" != "$tag" ] || imageId=
|
[ "$imageId" != "$tag" ] || imageId=
|
||||||
[ "$tag" != "$imageTag" ] || tag='latest'
|
[ "$tag" != "$imageTag" ] || tag='latest'
|
||||||
tag="${tag%@*}"
|
tag="${tag%@*}"
|
||||||
|
|
||||||
token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
|
token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
|
||||||
|
|
||||||
if [ -z "$imageId" ]; then
|
if [ -z "$imageId" ]; then
|
||||||
imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")"
|
imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")"
|
||||||
imageId="${imageId//\"/}"
|
imageId="${imageId//\"/}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")"
|
ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")"
|
||||||
if [ "${ancestryJson:0:1}" != '[' ]; then
|
if [ "${ancestryJson:0:1}" != '[' ]; then
|
||||||
echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:"
|
echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:"
|
||||||
echo >&2 " $ancestryJson"
|
echo >&2 " $ancestryJson"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
IFS=','
|
IFS=','
|
||||||
ancestry=( ${ancestryJson//[\[\] \"]/} )
|
ancestry=( ${ancestryJson//[\[\] \"]/} )
|
||||||
unset IFS
|
unset IFS
|
||||||
|
|
||||||
if [ -s "$dir/tags-$image.tmp" ]; then
|
if [ -s "$dir/tags-$image.tmp" ]; then
|
||||||
echo -n ', ' >> "$dir/tags-$image.tmp"
|
echo -n ', ' >> "$dir/tags-$image.tmp"
|
||||||
else
|
else
|
||||||
images=( "${images[@]}" "$image" )
|
images=( "${images[@]}" "$image" )
|
||||||
fi
|
fi
|
||||||
echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
|
echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
|
||||||
|
|
||||||
echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
|
echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
|
||||||
for imageId in "${ancestry[@]}"; do
|
for imageId in "${ancestry[@]}"; do
|
||||||
mkdir -p "$dir/$imageId"
|
mkdir -p "$dir/$imageId"
|
||||||
echo '1.0' > "$dir/$imageId/VERSION"
|
echo '1.0' > "$dir/$imageId/VERSION"
|
||||||
|
|
||||||
curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json"
|
curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json"
|
||||||
|
|
||||||
# TODO figure out why "-C -" doesn't work here
|
# TODO figure out why "-C -" doesn't work here
|
||||||
# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume."
|
# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume."
|
||||||
# "HTTP/1.1 416 Requested Range Not Satisfiable"
|
# "HTTP/1.1 416 Requested Range Not Satisfiable"
|
||||||
|
|
|
@ -10,11 +10,11 @@ cat <<-EOF
|
||||||
Description=$desc
|
Description=$desc
|
||||||
Author=$auth
|
Author=$auth
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/bin/docker start -a $cid
|
ExecStart=/usr/bin/docker start -a $cid
|
||||||
ExecStop=/usr/bin/docker stop -t 2 $cid
|
ExecStop=/usr/bin/docker stop -t 2 $cid
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=local.target
|
WantedBy=local.target
|
||||||
EOF
|
EOF
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# /etc/sysconfig/docker
|
# /etc/sysconfig/docker
|
||||||
#
|
#
|
||||||
# Other arguments to pass to the docker daemon process
|
# Other arguments to pass to the docker daemon process
|
||||||
# These will be parsed by the sysv initscript and appended
|
# These will be parsed by the sysv initscript and appended
|
||||||
# to the arguments list passed to docker -d
|
# to the arguments list passed to docker -d
|
||||||
|
|
|
@ -14,9 +14,9 @@ justTar=
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo >&2
|
echo >&2
|
||||||
|
|
||||||
echo >&2 "usage: $0 [options] repo suite [mirror]"
|
echo >&2 "usage: $0 [options] repo suite [mirror]"
|
||||||
|
|
||||||
echo >&2
|
echo >&2
|
||||||
echo >&2 'options: (not recommended)'
|
echo >&2 'options: (not recommended)'
|
||||||
echo >&2 " -p set an http_proxy for debootstrap"
|
echo >&2 " -p set an http_proxy for debootstrap"
|
||||||
|
@ -26,20 +26,20 @@ usage() {
|
||||||
echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
|
echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
|
||||||
echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list"
|
echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list"
|
||||||
echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
|
echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
|
||||||
|
|
||||||
echo >&2
|
echo >&2
|
||||||
echo >&2 " ie: $0 username/debian squeeze"
|
echo >&2 " ie: $0 username/debian squeeze"
|
||||||
echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
|
echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
|
||||||
|
|
||||||
echo >&2
|
echo >&2
|
||||||
echo >&2 " ie: $0 username/ubuntu precise"
|
echo >&2 " ie: $0 username/ubuntu precise"
|
||||||
echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
|
echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
|
||||||
|
|
||||||
echo >&2
|
echo >&2
|
||||||
echo >&2 " ie: $0 -t precise.tar.bz2 precise"
|
echo >&2 " ie: $0 -t precise.tar.bz2 precise"
|
||||||
echo >&2 " $0 -t wheezy.tgz wheezy"
|
echo >&2 " $0 -t wheezy.tgz wheezy"
|
||||||
echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
|
echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
|
||||||
|
|
||||||
echo >&2
|
echo >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,10 +145,10 @@ if [ -z "$strictDebootstrap" ]; then
|
||||||
sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
|
sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
|
||||||
sudo ln -sf /bin/true sbin/initctl
|
sudo ln -sf /bin/true sbin/initctl
|
||||||
# see https://github.com/docker/docker/issues/446#issuecomment-16953173
|
# see https://github.com/docker/docker/issues/446#issuecomment-16953173
|
||||||
|
|
||||||
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
||||||
sudo chroot . apt-get clean
|
sudo chroot . apt-get clean
|
||||||
|
|
||||||
if strings usr/bin/dpkg | grep -q unsafe-io; then
|
if strings usr/bin/dpkg | grep -q unsafe-io; then
|
||||||
# while we're at it, apt is unnecessarily slow inside containers
|
# while we're at it, apt is unnecessarily slow inside containers
|
||||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||||
|
@ -159,7 +159,7 @@ if [ -z "$strictDebootstrap" ]; then
|
||||||
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
|
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
|
||||||
# and ubuntu lucid/10.04 only has 1.15.5.6
|
# and ubuntu lucid/10.04 only has 1.15.5.6
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||||
{
|
{
|
||||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||||
|
@ -167,17 +167,17 @@ if [ -z "$strictDebootstrap" ]; then
|
||||||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||||
|
|
||||||
# and remove the translations, too
|
# and remove the translations, too
|
||||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||||
|
|
||||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||||
# rm /usr/sbin/policy-rc.d
|
# rm /usr/sbin/policy-rc.d
|
||||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||||
# rm /etc/apt/apt.conf.d/no-cache
|
# rm /etc/apt/apt.conf.d/no-cache
|
||||||
# rm /etc/apt/apt.conf.d/no-languages
|
# rm /etc/apt/apt.conf.d/no-languages
|
||||||
|
|
||||||
if [ -z "$skipDetection" ]; then
|
if [ -z "$skipDetection" ]; then
|
||||||
# see also rudimentary platform detection in hack/install.sh
|
# see also rudimentary platform detection in hack/install.sh
|
||||||
lsbDist=''
|
lsbDist=''
|
||||||
|
@ -187,14 +187,14 @@ if [ -z "$strictDebootstrap" ]; then
|
||||||
if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
|
if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
|
||||||
lsbDist='Debian'
|
lsbDist='Debian'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "$lsbDist" in
|
case "$lsbDist" in
|
||||||
Debian)
|
Debian)
|
||||||
# add the updates and security repositories
|
# add the updates and security repositories
|
||||||
if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
|
if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
|
||||||
# ${suite}-updates only applies to non-unstable
|
# ${suite}-updates only applies to non-unstable
|
||||||
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
|
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
|
||||||
|
|
||||||
# same for security updates
|
# same for security updates
|
||||||
echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
|
echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
|
||||||
fi
|
fi
|
||||||
|
@ -220,7 +220,7 @@ if [ -z "$strictDebootstrap" ]; then
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# make sure our packages lists are as up to date as we can get them
|
# make sure our packages lists are as up to date as we can get them
|
||||||
sudo chroot . apt-get update
|
sudo chroot . apt-get update
|
||||||
sudo chroot . apt-get dist-upgrade -y
|
sudo chroot . apt-get dist-upgrade -y
|
||||||
|
@ -229,23 +229,23 @@ fi
|
||||||
if [ "$justTar" ]; then
|
if [ "$justTar" ]; then
|
||||||
# create the tarball file so it has the right permissions (ie, not root)
|
# create the tarball file so it has the right permissions (ie, not root)
|
||||||
touch "$repo"
|
touch "$repo"
|
||||||
|
|
||||||
# fill the tarball
|
# fill the tarball
|
||||||
sudo tar --numeric-owner -caf "$repo" .
|
sudo tar --numeric-owner -caf "$repo" .
|
||||||
else
|
else
|
||||||
# create the image (and tag $repo:$suite)
|
# create the image (and tag $repo:$suite)
|
||||||
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
|
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
|
||||||
|
|
||||||
# test the image
|
# test the image
|
||||||
$docker run -i -t $repo:$suite echo success
|
$docker run -i -t $repo:$suite echo success
|
||||||
|
|
||||||
if [ -z "$skipDetection" ]; then
|
if [ -z "$skipDetection" ]; then
|
||||||
case "$lsbDist" in
|
case "$lsbDist" in
|
||||||
Debian)
|
Debian)
|
||||||
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
|
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
|
||||||
# tag latest
|
# tag latest
|
||||||
$docker tag $repo:$suite $repo:latest
|
$docker tag $repo:$suite $repo:latest
|
||||||
|
|
||||||
if [ -r etc/debian_version ]; then
|
if [ -r etc/debian_version ]; then
|
||||||
# tag the specific debian release version (which is only reasonable to tag on debian stable)
|
# tag the specific debian release version (which is only reasonable to tag on debian stable)
|
||||||
ver=$(cat etc/debian_version)
|
ver=$(cat etc/debian_version)
|
||||||
|
|
|
@ -19,7 +19,7 @@ shift
|
||||||
chrootPath="$(type -P chroot)"
|
chrootPath="$(type -P chroot)"
|
||||||
rootfs_chroot() {
|
rootfs_chroot() {
|
||||||
# "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately!
|
# "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately!
|
||||||
|
|
||||||
# set PATH and chroot away!
|
# set PATH and chroot away!
|
||||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \
|
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \
|
||||||
"$chrootPath" "$rootfsDir" "$@"
|
"$chrootPath" "$rootfsDir" "$@"
|
||||||
|
@ -220,13 +220,13 @@ fi
|
||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# make sure we're fully up-to-date
|
# make sure we're fully up-to-date
|
||||||
rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y'
|
rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y'
|
||||||
|
|
||||||
# delete all the apt list files since they're big and get stale quickly
|
# delete all the apt list files since they're big and get stale quickly
|
||||||
rm -rf "$rootfsDir/var/lib/apt/lists"/*
|
rm -rf "$rootfsDir/var/lib/apt/lists"/*
|
||||||
# this forces "apt-get update" in dependent images, which is also good
|
# this forces "apt-get update" in dependent images, which is also good
|
||||||
|
|
||||||
mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
|
mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
|
||||||
)
|
)
|
||||||
|
|
|
@ -29,41 +29,41 @@ function template() {
|
||||||
# this should always match the template from CONTRIBUTING.md
|
# this should always match the template from CONTRIBUTING.md
|
||||||
cat <<- EOM
|
cat <<- EOM
|
||||||
Description of problem:
|
Description of problem:
|
||||||
|
|
||||||
|
|
||||||
\`docker version\`:
|
\`docker version\`:
|
||||||
`${DOCKER_COMMAND} -D version`
|
`${DOCKER_COMMAND} -D version`
|
||||||
|
|
||||||
|
|
||||||
\`docker info\`:
|
\`docker info\`:
|
||||||
`${DOCKER_COMMAND} -D info`
|
`${DOCKER_COMMAND} -D info`
|
||||||
|
|
||||||
|
|
||||||
\`uname -a\`:
|
\`uname -a\`:
|
||||||
`uname -a`
|
`uname -a`
|
||||||
|
|
||||||
|
|
||||||
Environment details (AWS, VirtualBox, physical, etc.):
|
Environment details (AWS, VirtualBox, physical, etc.):
|
||||||
|
|
||||||
|
|
||||||
How reproducible:
|
How reproducible:
|
||||||
|
|
||||||
|
|
||||||
Steps to Reproduce:
|
Steps to Reproduce:
|
||||||
1.
|
1.
|
||||||
2.
|
2.
|
||||||
3.
|
3.
|
||||||
|
|
||||||
|
|
||||||
Actual Results:
|
Actual Results:
|
||||||
|
|
||||||
|
|
||||||
Expected Results:
|
Expected Results:
|
||||||
|
|
||||||
|
|
||||||
Additional info:
|
Additional info:
|
||||||
|
|
||||||
|
|
||||||
EOM
|
EOM
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ echo -ne "Do you use \`sudo\` to call docker? [y|N]: "
|
||||||
read -r -n 1 use_sudo
|
read -r -n 1 use_sudo
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
||||||
export DOCKER_COMMAND="sudo ${DOCKER}"
|
export DOCKER_COMMAND="sudo ${DOCKER}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -18,12 +18,12 @@
|
||||||
<key>0</key>
|
<key>0</key>
|
||||||
<dict>
|
<dict>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>keyword.control.dockerfile</string>
|
<string>keyword.control.dockerfile</string>
|
||||||
</dict>
|
</dict>
|
||||||
<key>1</key>
|
<key>1</key>
|
||||||
<dict>
|
<dict>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>keyword.other.special-method.dockerfile</string>
|
<string>keyword.other.special-method.dockerfile</string>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
|
@ -35,12 +35,12 @@
|
||||||
<key>0</key>
|
<key>0</key>
|
||||||
<dict>
|
<dict>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>keyword.operator.dockerfile</string>
|
<string>keyword.operator.dockerfile</string>
|
||||||
</dict>
|
</dict>
|
||||||
<key>1</key>
|
<key>1</key>
|
||||||
<dict>
|
<dict>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>keyword.other.special-method.dockerfile</string>
|
<string>keyword.other.special-method.dockerfile</string>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
|
|
|
@ -176,15 +176,15 @@ pages:
|
||||||
# Project:
|
# Project:
|
||||||
- ['project/index.md', '**HIDDEN**']
|
- ['project/index.md', '**HIDDEN**']
|
||||||
- ['project/who-written-for.md', 'Contributor Guide', 'README first']
|
- ['project/who-written-for.md', 'Contributor Guide', 'README first']
|
||||||
- ['project/software-required.md', 'Contributor Guide', 'Get required software']
|
- ['project/software-required.md', 'Contributor Guide', 'Get required software']
|
||||||
- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing']
|
- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing']
|
||||||
- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container']
|
- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container']
|
||||||
- ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation']
|
- ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation']
|
||||||
- ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow']
|
- ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow']
|
||||||
- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue']
|
- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue']
|
||||||
- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue']
|
- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue']
|
||||||
- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request']
|
- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request']
|
||||||
- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review']
|
- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review']
|
||||||
- ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing']
|
- ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing']
|
||||||
- ['project/get-help.md', 'Contributor Guide', 'Where to get help']
|
- ['project/get-help.md', 'Contributor Guide', 'Where to get help']
|
||||||
- ['project/coding-style.md', 'Contributor Guide', 'Coding style guide']
|
- ['project/coding-style.md', 'Contributor Guide', 'Coding style guide']
|
||||||
|
|
|
@ -5,7 +5,7 @@ set -o pipefail
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
cat >&2 <<'EOF'
|
cat >&2 <<'EOF'
|
||||||
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
|
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
|
||||||
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
|
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
|
||||||
and set the AWS_S3_BUCKET env var to the name of your bucket.
|
and set the AWS_S3_BUCKET env var to the name of your bucket.
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ FROM ubuntu
|
||||||
MAINTAINER SvenDowideit@docker.com
|
MAINTAINER SvenDowideit@docker.com
|
||||||
|
|
||||||
# Add the PostgreSQL PGP key to verify their Debian packages.
|
# Add the PostgreSQL PGP key to verify their Debian packages.
|
||||||
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
|
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
|
||||||
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
|
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
|
||||||
|
|
||||||
# Add PostgreSQL's repository. It contains the most recent stable release
|
# Add PostgreSQL's repository. It contains the most recent stable release
|
||||||
|
@ -33,7 +33,7 @@ RUN /etc/init.d/postgresql start &&\
|
||||||
createdb -O docker docker
|
createdb -O docker docker
|
||||||
|
|
||||||
# Adjust PostgreSQL configuration so that remote connections to the
|
# Adjust PostgreSQL configuration so that remote connections to the
|
||||||
# database are possible.
|
# database are possible.
|
||||||
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
|
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
|
||||||
|
|
||||||
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
|
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
|
||||||
|
|
|
@ -3,23 +3,23 @@
|
||||||
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||||
# this is kind of an expensive check, so let's not do this twice if we
|
# this is kind of an expensive check, so let's not do this twice if we
|
||||||
# are running more than one validate bundlescript
|
# are running more than one validate bundlescript
|
||||||
|
|
||||||
VALIDATE_REPO='https://github.com/docker/docker.git'
|
VALIDATE_REPO='https://github.com/docker/docker.git'
|
||||||
VALIDATE_BRANCH='master'
|
VALIDATE_BRANCH='master'
|
||||||
|
|
||||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||||
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
||||||
|
|
||||||
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
||||||
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
||||||
|
|
||||||
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
||||||
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
||||||
|
|
||||||
validate_diff() {
|
validate_diff() {
|
||||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||||
git diff "$VALIDATE_COMMIT_DIFF" "$@"
|
git diff "$VALIDATE_COMMIT_DIFF" "$@"
|
||||||
|
|
|
@ -5,7 +5,7 @@ DEST=$1
|
||||||
|
|
||||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||||
source "$(dirname "$BASH_SOURCE")/.dockerinit"
|
source "$(dirname "$BASH_SOURCE")/.dockerinit"
|
||||||
|
|
||||||
hash_files "$DEST/dockerinit-$VERSION"
|
hash_files "$DEST/dockerinit-$VERSION"
|
||||||
else
|
else
|
||||||
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
||||||
|
|
|
@ -5,7 +5,7 @@ DEST=$1
|
||||||
|
|
||||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||||
source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo"
|
source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo"
|
||||||
|
|
||||||
hash_files "$DEST/dockerinit-$VERSION"
|
hash_files "$DEST/dockerinit-$VERSION"
|
||||||
else
|
else
|
||||||
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
||||||
|
|
|
@ -18,17 +18,17 @@ for d in "$CROSS/"*/*; do
|
||||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||||
mkdir -p "$DEST/$GOOS/$GOARCH"
|
mkdir -p "$DEST/$GOOS/$GOARCH"
|
||||||
TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
|
TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
|
||||||
|
|
||||||
mkdir -p "$DEST/build"
|
mkdir -p "$DEST/build"
|
||||||
|
|
||||||
mkdir -p "$DEST/build/usr/local/bin"
|
mkdir -p "$DEST/build/usr/local/bin"
|
||||||
cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
|
cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
|
||||||
|
|
||||||
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
|
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
|
||||||
|
|
||||||
hash_files "$TGZ"
|
hash_files "$TGZ"
|
||||||
|
|
||||||
rm -rf "$DEST/build"
|
rm -rf "$DEST/build"
|
||||||
|
|
||||||
echo "Created tgz: $TGZ"
|
echo "Created tgz: $TGZ"
|
||||||
done
|
done
|
||||||
|
|
|
@ -11,17 +11,17 @@ clone() {
|
||||||
vcs=$1
|
vcs=$1
|
||||||
pkg=$2
|
pkg=$2
|
||||||
rev=$3
|
rev=$3
|
||||||
|
|
||||||
pkg_url=https://$pkg
|
pkg_url=https://$pkg
|
||||||
target_dir=src/$pkg
|
target_dir=src/$pkg
|
||||||
|
|
||||||
echo -n "$pkg @ $rev: "
|
echo -n "$pkg @ $rev: "
|
||||||
|
|
||||||
if [ -d $target_dir ]; then
|
if [ -d $target_dir ]; then
|
||||||
echo -n 'rm old, '
|
echo -n 'rm old, '
|
||||||
rm -fr $target_dir
|
rm -fr $target_dir
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -n 'clone, '
|
echo -n 'clone, '
|
||||||
case $vcs in
|
case $vcs in
|
||||||
git)
|
git)
|
||||||
|
@ -32,10 +32,10 @@ clone() {
|
||||||
hg clone --quiet --updaterev $rev $pkg_url $target_dir
|
hg clone --quiet --updaterev $rev $pkg_url $target_dir
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo -n 'rm VCS, '
|
echo -n 'rm VCS, '
|
||||||
( cd $target_dir && rm -rf .{git,hg} )
|
( cd $target_dir && rm -rf .{git,hg} )
|
||||||
|
|
||||||
echo done
|
echo done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue