.: remove trailing white spaces
blame tibor this one ;-) ``` find . -type f -not -name '*.png' -not -name '*.go' -not -name '*.md' -not -name '*.tar' -not -name '*.pem' -not -path './vendor/*' -not -path './.git/*' -not -path '*/testdata/*' -not -path './docs/*images*' -not -path '*/testfiles/*' -not -path './bundles/*' -not -path './docs/*static*/*' -not -path './docs/*article-img/*' -exec grep -HnEl '[[:space:]]$' {} \; | xargs sed -iE 's/[[:space:]]*$//' ``` Signed-off-by: Vincent Batts <vbatts@redhat.com>
This commit is contained in:
parent
a0cfe83435
commit
7617ec176d
20 changed files with 109 additions and 109 deletions
26
MAINTAINERS
26
MAINTAINERS
|
@ -113,7 +113,7 @@ It is the responsibility of the subsystem maintainers to process patches affecti
|
|||
manner.
|
||||
|
||||
* If the change affects areas of the code which are not part of a subsystem,
|
||||
or if subsystem maintainers are unable to reach a timely decision, it must be approved by
|
||||
or if subsystem maintainers are unable to reach a timely decision, it must be approved by
|
||||
the core maintainers.
|
||||
|
||||
* If the change affects the UI or public APIs, or if it represents a major change in architecture,
|
||||
|
@ -200,11 +200,11 @@ for each.
|
|||
2-code-review = "requires more code changes"
|
||||
1-design-review = "raises design concerns"
|
||||
4-merge = "general case"
|
||||
|
||||
|
||||
# Docs approval
|
||||
[Rules.review.docs-approval]
|
||||
# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
|
||||
# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR).
|
||||
# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR).
|
||||
|
||||
# Merge
|
||||
[Rules.review.states.4-merge]
|
||||
|
@ -268,7 +268,7 @@ made through a pull request.
|
|||
|
||||
# The chief architect is responsible for the overall integrity of the technical architecture
|
||||
# across all subsystems, and the consistency of APIs and UI.
|
||||
#
|
||||
#
|
||||
# Changes to UI, public APIs and overall architecture (for example a plugin system) must
|
||||
# be approved by the chief architect.
|
||||
"Chief Architect" = "shykes"
|
||||
|
@ -314,7 +314,7 @@ made through a pull request.
|
|||
]
|
||||
|
||||
# The chief maintainer is responsible for all aspects of quality for the project including
|
||||
# code reviews, usability, stability, security, performance, etc.
|
||||
# code reviews, usability, stability, security, performance, etc.
|
||||
# The most important function of the chief maintainer is to lead by example. On the first
|
||||
# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
|
||||
# be fine".
|
||||
|
@ -359,9 +359,9 @@ made through a pull request.
|
|||
# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
|
||||
# for its quality.
|
||||
# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
|
||||
#
|
||||
#
|
||||
# The maintainers of each subsytem are responsible for:
|
||||
#
|
||||
#
|
||||
# 1. Exposing a clear road map for improving their subsystem.
|
||||
# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
|
||||
# 3. Be available to anyone with questions, bug reports, criticism etc.
|
||||
|
@ -371,9 +371,9 @@ made through a pull request.
|
|||
# road map of the project.
|
||||
#
|
||||
# #### How to review patches to your subsystem
|
||||
#
|
||||
#
|
||||
# Accepting pull requests:
|
||||
#
|
||||
#
|
||||
# - If the pull request appears to be ready to merge, give it a `LGTM`, which
|
||||
# stands for "Looks Good To Me".
|
||||
# - If the pull request has some small problems that need to be changed, make
|
||||
|
@ -384,9 +384,9 @@ made through a pull request.
|
|||
# - If the PR only needs a few changes before being merged, any MAINTAINER can
|
||||
# make a replacement PR that incorporates the existing commits and fixes the
|
||||
# problems before a fast track merge.
|
||||
#
|
||||
#
|
||||
# Closing pull requests:
|
||||
#
|
||||
#
|
||||
# - If a PR appears to be abandoned, after having attempted to contact the
|
||||
# original contributor, then a replacement PR may be made. Once the
|
||||
# replacement PR is made, any contributor may close the original one.
|
||||
|
@ -584,12 +584,12 @@ made through a pull request.
|
|||
Name = "Solomon Hykes"
|
||||
Email = "solomon@docker.com"
|
||||
GitHub = "shykes"
|
||||
|
||||
|
||||
[people.spf13]
|
||||
Name = "Steve Francia"
|
||||
Email = "steve.francia@gmail.com"
|
||||
GitHub = "spf13"
|
||||
|
||||
|
||||
[people.sven]
|
||||
Name = "Sven Dowideit"
|
||||
Email = "SvenDowideit@home.org.au"
|
||||
|
|
4
NOTICE
4
NOTICE
|
@ -10,9 +10,9 @@ The following is courtesy of our legal counsel:
|
|||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ hello\\ | hello\
|
|||
'hello\' | hello\
|
||||
"''" | ''
|
||||
$. | $.
|
||||
$1 |
|
||||
$1 |
|
||||
he$1x | hex
|
||||
he$.x | he$.x
|
||||
he$pwd. | he.
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
# must have access to the socket for the completions to function correctly
|
||||
#
|
||||
# Note for developers:
|
||||
# Please arrange options sorted alphabetically by long name with the short
|
||||
# Please arrange options sorted alphabetically by long name with the short
|
||||
# options immediately following their corresponding long form.
|
||||
# This order should be applied to lists, alternatives and code blocks.
|
||||
|
||||
|
@ -257,8 +257,8 @@ _docker_build() {
|
|||
;;
|
||||
--file|-f)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# AUTHOR: Jessica Frazelle <jess@docker.com>
|
||||
# COMMENTS:
|
||||
# This file describes how to build a gparted container with all
|
||||
# dependencies installed. It uses native X11 unix socket.
|
||||
# dependencies installed. It uses native X11 unix socket.
|
||||
# Tested on Debian Jessie
|
||||
# USAGE:
|
||||
# # Download gparted Dockerfile
|
||||
|
|
|
@ -41,39 +41,39 @@ while [ $# -gt 0 ]; do
|
|||
[ "$imageId" != "$tag" ] || imageId=
|
||||
[ "$tag" != "$imageTag" ] || tag='latest'
|
||||
tag="${tag%@*}"
|
||||
|
||||
|
||||
token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
|
||||
|
||||
|
||||
if [ -z "$imageId" ]; then
|
||||
imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")"
|
||||
imageId="${imageId//\"/}"
|
||||
fi
|
||||
|
||||
|
||||
ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")"
|
||||
if [ "${ancestryJson:0:1}" != '[' ]; then
|
||||
echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:"
|
||||
echo >&2 " $ancestryJson"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
IFS=','
|
||||
ancestry=( ${ancestryJson//[\[\] \"]/} )
|
||||
unset IFS
|
||||
|
||||
|
||||
if [ -s "$dir/tags-$image.tmp" ]; then
|
||||
echo -n ', ' >> "$dir/tags-$image.tmp"
|
||||
else
|
||||
images=( "${images[@]}" "$image" )
|
||||
fi
|
||||
echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
|
||||
|
||||
|
||||
echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
|
||||
for imageId in "${ancestry[@]}"; do
|
||||
mkdir -p "$dir/$imageId"
|
||||
echo '1.0' > "$dir/$imageId/VERSION"
|
||||
|
||||
|
||||
curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json"
|
||||
|
||||
|
||||
# TODO figure out why "-C -" doesn't work here
|
||||
# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume."
|
||||
# "HTTP/1.1 416 Requested Range Not Satisfiable"
|
||||
|
|
|
@ -10,11 +10,11 @@ cat <<-EOF
|
|||
Description=$desc
|
||||
Author=$auth
|
||||
After=docker.service
|
||||
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/docker start -a $cid
|
||||
ExecStop=/usr/bin/docker stop -t 2 $cid
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=local.target
|
||||
EOF
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# /etc/sysconfig/docker
|
||||
#
|
||||
#
|
||||
# Other arguments to pass to the docker daemon process
|
||||
# These will be parsed by the sysv initscript and appended
|
||||
# to the arguments list passed to docker -d
|
||||
|
|
|
@ -14,9 +14,9 @@ justTar=
|
|||
|
||||
usage() {
|
||||
echo >&2
|
||||
|
||||
|
||||
echo >&2 "usage: $0 [options] repo suite [mirror]"
|
||||
|
||||
|
||||
echo >&2
|
||||
echo >&2 'options: (not recommended)'
|
||||
echo >&2 " -p set an http_proxy for debootstrap"
|
||||
|
@ -26,20 +26,20 @@ usage() {
|
|||
echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
|
||||
echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list"
|
||||
echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
|
||||
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 username/debian squeeze"
|
||||
echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
|
||||
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 username/ubuntu precise"
|
||||
echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
|
||||
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 -t precise.tar.bz2 precise"
|
||||
echo >&2 " $0 -t wheezy.tgz wheezy"
|
||||
echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
|
||||
|
||||
|
||||
echo >&2
|
||||
}
|
||||
|
||||
|
@ -145,10 +145,10 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
|
||||
sudo ln -sf /bin/true sbin/initctl
|
||||
# see https://github.com/docker/docker/issues/446#issuecomment-16953173
|
||||
|
||||
|
||||
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
||||
sudo chroot . apt-get clean
|
||||
|
||||
|
||||
if strings usr/bin/dpkg | grep -q unsafe-io; then
|
||||
# while we're at it, apt is unnecessarily slow inside containers
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
|
@ -159,7 +159,7 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
|
||||
# and ubuntu lucid/10.04 only has 1.15.5.6
|
||||
fi
|
||||
|
||||
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
|
@ -167,17 +167,17 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
|
||||
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
# rm /usr/sbin/policy-rc.d
|
||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||
# rm /etc/apt/apt.conf.d/no-cache
|
||||
# rm /etc/apt/apt.conf.d/no-languages
|
||||
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
# see also rudimentary platform detection in hack/install.sh
|
||||
lsbDist=''
|
||||
|
@ -187,14 +187,14 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
|
||||
lsbDist='Debian'
|
||||
fi
|
||||
|
||||
|
||||
case "$lsbDist" in
|
||||
Debian)
|
||||
# add the updates and security repositories
|
||||
if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
|
||||
# ${suite}-updates only applies to non-unstable
|
||||
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
|
||||
|
||||
|
||||
# same for security updates
|
||||
echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
|
||||
fi
|
||||
|
@ -220,7 +220,7 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
|
||||
# make sure our packages lists are as up to date as we can get them
|
||||
sudo chroot . apt-get update
|
||||
sudo chroot . apt-get dist-upgrade -y
|
||||
|
@ -229,23 +229,23 @@ fi
|
|||
if [ "$justTar" ]; then
|
||||
# create the tarball file so it has the right permissions (ie, not root)
|
||||
touch "$repo"
|
||||
|
||||
|
||||
# fill the tarball
|
||||
sudo tar --numeric-owner -caf "$repo" .
|
||||
else
|
||||
# create the image (and tag $repo:$suite)
|
||||
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
|
||||
|
||||
|
||||
# test the image
|
||||
$docker run -i -t $repo:$suite echo success
|
||||
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
case "$lsbDist" in
|
||||
Debian)
|
||||
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
|
||||
# tag latest
|
||||
$docker tag $repo:$suite $repo:latest
|
||||
|
||||
|
||||
if [ -r etc/debian_version ]; then
|
||||
# tag the specific debian release version (which is only reasonable to tag on debian stable)
|
||||
ver=$(cat etc/debian_version)
|
||||
|
|
|
@ -19,7 +19,7 @@ shift
|
|||
chrootPath="$(type -P chroot)"
|
||||
rootfs_chroot() {
|
||||
# "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately!
|
||||
|
||||
|
||||
# set PATH and chroot away!
|
||||
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \
|
||||
"$chrootPath" "$rootfsDir" "$@"
|
||||
|
@ -220,13 +220,13 @@ fi
|
|||
|
||||
(
|
||||
set -x
|
||||
|
||||
|
||||
# make sure we're fully up-to-date
|
||||
rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y'
|
||||
|
||||
|
||||
# delete all the apt list files since they're big and get stale quickly
|
||||
rm -rf "$rootfsDir/var/lib/apt/lists"/*
|
||||
# this forces "apt-get update" in dependent images, which is also good
|
||||
|
||||
|
||||
mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
|
||||
)
|
||||
|
|
|
@ -29,41 +29,41 @@ function template() {
|
|||
# this should always match the template from CONTRIBUTING.md
|
||||
cat <<- EOM
|
||||
Description of problem:
|
||||
|
||||
|
||||
|
||||
|
||||
\`docker version\`:
|
||||
`${DOCKER_COMMAND} -D version`
|
||||
|
||||
|
||||
|
||||
|
||||
\`docker info\`:
|
||||
`${DOCKER_COMMAND} -D info`
|
||||
|
||||
|
||||
|
||||
|
||||
\`uname -a\`:
|
||||
`uname -a`
|
||||
|
||||
|
||||
|
||||
|
||||
Environment details (AWS, VirtualBox, physical, etc.):
|
||||
|
||||
|
||||
|
||||
|
||||
How reproducible:
|
||||
|
||||
|
||||
|
||||
|
||||
Steps to Reproduce:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
|
||||
|
||||
Actual Results:
|
||||
|
||||
|
||||
|
||||
|
||||
Expected Results:
|
||||
|
||||
|
||||
|
||||
|
||||
Additional info:
|
||||
|
||||
|
||||
|
||||
|
||||
EOM
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ echo -ne "Do you use \`sudo\` to call docker? [y|N]: "
|
|||
read -r -n 1 use_sudo
|
||||
echo ""
|
||||
|
||||
if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
||||
if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
||||
export DOCKER_COMMAND="sudo ${DOCKER}"
|
||||
fi
|
||||
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
<key>0</key>
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>keyword.control.dockerfile</string>
|
||||
<string>keyword.control.dockerfile</string>
|
||||
</dict>
|
||||
<key>1</key>
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>keyword.other.special-method.dockerfile</string>
|
||||
<string>keyword.other.special-method.dockerfile</string>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
|
@ -35,12 +35,12 @@
|
|||
<key>0</key>
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>keyword.operator.dockerfile</string>
|
||||
<string>keyword.operator.dockerfile</string>
|
||||
</dict>
|
||||
<key>1</key>
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>keyword.other.special-method.dockerfile</string>
|
||||
<string>keyword.other.special-method.dockerfile</string>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
|
|
|
@ -176,15 +176,15 @@ pages:
|
|||
# Project:
|
||||
- ['project/index.md', '**HIDDEN**']
|
||||
- ['project/who-written-for.md', 'Contributor Guide', 'README first']
|
||||
- ['project/software-required.md', 'Contributor Guide', 'Get required software']
|
||||
- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing']
|
||||
- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container']
|
||||
- ['project/software-required.md', 'Contributor Guide', 'Get required software']
|
||||
- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing']
|
||||
- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container']
|
||||
- ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation']
|
||||
- ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow']
|
||||
- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue']
|
||||
- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue']
|
||||
- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request']
|
||||
- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review']
|
||||
- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue']
|
||||
- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue']
|
||||
- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request']
|
||||
- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review']
|
||||
- ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing']
|
||||
- ['project/get-help.md', 'Contributor Guide', 'Where to get help']
|
||||
- ['project/coding-style.md', 'Contributor Guide', 'Coding style guide']
|
||||
|
|
|
@ -5,7 +5,7 @@ set -o pipefail
|
|||
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
|
||||
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
|
||||
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
|
||||
and set the AWS_S3_BUCKET env var to the name of your bucket.
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ FROM ubuntu
|
|||
MAINTAINER SvenDowideit@docker.com
|
||||
|
||||
# Add the PostgreSQL PGP key to verify their Debian packages.
|
||||
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
|
||||
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
|
||||
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
|
||||
|
||||
# Add PostgreSQL's repository. It contains the most recent stable release
|
||||
|
@ -33,7 +33,7 @@ RUN /etc/init.d/postgresql start &&\
|
|||
createdb -O docker docker
|
||||
|
||||
# Adjust PostgreSQL configuration so that remote connections to the
|
||||
# database are possible.
|
||||
# database are possible.
|
||||
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
|
||||
|
||||
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
|
||||
|
|
|
@ -3,23 +3,23 @@
|
|||
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
|
||||
VALIDATE_REPO='https://github.com/docker/docker.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
||||
fi
|
||||
|
||||
|
||||
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
||||
|
||||
|
||||
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
||||
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
||||
|
||||
|
||||
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
||||
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
||||
|
||||
|
||||
validate_diff() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git diff "$VALIDATE_COMMIT_DIFF" "$@"
|
||||
|
|
|
@ -5,7 +5,7 @@ DEST=$1
|
|||
|
||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||
source "$(dirname "$BASH_SOURCE")/.dockerinit"
|
||||
|
||||
|
||||
hash_files "$DEST/dockerinit-$VERSION"
|
||||
else
|
||||
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
||||
|
|
|
@ -5,7 +5,7 @@ DEST=$1
|
|||
|
||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||
source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo"
|
||||
|
||||
|
||||
hash_files "$DEST/dockerinit-$VERSION"
|
||||
else
|
||||
# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
|
||||
|
|
|
@ -18,17 +18,17 @@ for d in "$CROSS/"*/*; do
|
|||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
mkdir -p "$DEST/$GOOS/$GOARCH"
|
||||
TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
|
||||
|
||||
|
||||
mkdir -p "$DEST/build"
|
||||
|
||||
|
||||
mkdir -p "$DEST/build/usr/local/bin"
|
||||
cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
|
||||
|
||||
|
||||
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
|
||||
|
||||
|
||||
hash_files "$TGZ"
|
||||
|
||||
|
||||
rm -rf "$DEST/build"
|
||||
|
||||
|
||||
echo "Created tgz: $TGZ"
|
||||
done
|
||||
|
|
|
@ -11,17 +11,17 @@ clone() {
|
|||
vcs=$1
|
||||
pkg=$2
|
||||
rev=$3
|
||||
|
||||
|
||||
pkg_url=https://$pkg
|
||||
target_dir=src/$pkg
|
||||
|
||||
|
||||
echo -n "$pkg @ $rev: "
|
||||
|
||||
|
||||
if [ -d $target_dir ]; then
|
||||
echo -n 'rm old, '
|
||||
rm -fr $target_dir
|
||||
fi
|
||||
|
||||
|
||||
echo -n 'clone, '
|
||||
case $vcs in
|
||||
git)
|
||||
|
@ -32,10 +32,10 @@ clone() {
|
|||
hg clone --quiet --updaterev $rev $pkg_url $target_dir
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
echo -n 'rm VCS, '
|
||||
( cd $target_dir && rm -rf .{git,hg} )
|
||||
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue