Przeglądaj źródła

Merge pull request #2263 from tianon/hack-release

Update hack/release.sh process to automatically invoke hack/make.sh and bail on building/testing issues
Tianon Gravi 11 lat temu
rodzic
commit
f0dbdba5c0
5 zmienionych plików z 95 dodań i 21 usunięć
  1. 1 0
      .gitignore
  2. 29 3
      hack/dind
  3. 4 3
      hack/make.sh
  4. 1 0
      hack/make/test
  5. 60 15
      hack/release.sh

+ 1 - 0
.gitignore

@@ -17,3 +17,4 @@ docs/_templates
 bundles/
 .hg/
 .git/
+vendor/pkg/

+ 29 - 3
hack/dind

@@ -27,6 +27,31 @@ do
 	[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
 	mountpoint -q $CGROUP/$SUBSYS || 
 		mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
+
+	# The two following sections address a bug which manifests itself
+	# by a cryptic "lxc-start: no ns_cgroup option specified" when
+	# trying to start containers withina container.
+	# The bug seems to appear when the cgroup hierarchies are not
+	# mounted on the exact same directories in the host, and in the
+	# container.
+
+	# Named, control-less cgroups are mounted with "-o name=foo"
+	# (and appear as such under /proc/<pid>/cgroup) but are usually
+	# mounted on a directory named "foo" (without the "name=" prefix).
+	# Systemd and OpenRC (and possibly others) both create such a
+	# cgroup. To avoid the aforementioned bug, we symlink "foo" to
+	# "name=foo". This shouldn't have any adverse effect.
+	echo $SUBSYS | grep -q ^name= && {
+		NAME=$(echo $SUBSYS | sed s/^name=//)
+		ln -s $SUBSYS $CGROUP/$NAME
+	}
+
+	# Likewise, on at least one system, it has been reported that
+	# systemd would mount the CPU and CPU accounting controllers
+	# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+	# but on a directory called "cpu,cpuacct" (note the inversion
+	# in the order of the groups). This tries to work around it.
+	[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
 done
 
 # Note: as I write those lines, the LXC userland tools cannot setup
@@ -38,7 +63,7 @@ grep -qw devices /proc/1/cgroup ||
 	echo "WARNING: it looks like the 'devices' cgroup is not mounted."
 
 # Now, close extraneous file descriptors.
-pushd /proc/self/fd
+pushd /proc/self/fd >/dev/null
 for FD in *
 do
 	case "$FD" in
@@ -51,9 +76,10 @@ do
 		;;
 	esac
 done
-popd
+popd >/dev/null
 
 # Mount /tmp
 mount -t tmpfs none /tmp
 
-exec $*
+[ "$1" ] && exec "$@"
+echo "You probably want to run hack/make.sh, or maybe a shell?"

+ 4 - 3
hack/make.sh

@@ -1,4 +1,5 @@
 #!/bin/bash
+set -e
 
 # This script builds various binary artifacts from a checkout of the docker
 # source code.
@@ -19,7 +20,7 @@
 #   "docker run hack/make.sh" in the resulting container image.
 #
 
-set -e
+set -o pipefail
 
 # We're a nice, sexy, little shell script, and people might try to run us;
 # but really, they shouldn't. We want to be in a container!
@@ -32,8 +33,8 @@ grep -q "$RESOLVCONF" /proc/mounts || {
 
 # List of bundles to create when no argument is passed
 DEFAULT_BUNDLES=(
-	test
 	binary
+	test
 	ubuntu
 )
 
@@ -66,7 +67,7 @@ main() {
 	fi
 	SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 	if [ $# -lt 1 ]; then
-		bundles=($DEFAULT_BUNDLES)
+		bundles=(${DEFAULT_BUNDLES[@]})
 	else
 		bundles=($@)
 	fi

+ 1 - 0
hack/make/test

@@ -14,6 +14,7 @@ bundle_test() {
 		for test_dir in $(find_test_dirs); do (
 			set -x
 			cd $test_dir
+			go test -i
 			go test -v -ldflags "$LDFLAGS" $TESTFLAGS
 		)  done
 	} 2>&1 | tee $DEST/test.log

+ 60 - 15
hack/release.sh

@@ -1,4 +1,5 @@
-#!/bin/sh
+#!/bin/bash
+set -e
 
 # This script looks for bundles built by make.sh, and releases them on a
 # public S3 bucket.
@@ -9,11 +10,11 @@
 # official Dockerfile at the root of the Docker source code. The Dockerfile,
 # make.sh and release.sh should all be from the same source code revision.
 
-set -e
+set -o pipefail
 
 # Print a usage message and exit.
 usage() {
-	cat <<EOF
+	cat >&2 <<'EOF'
 To run, I need:
 - to be in a container generated by the Dockerfile at the top of the Docker
   repository;
@@ -26,11 +27,12 @@ To run, I need:
 - a generous amount of good will and nice manners.
 The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
 
-docker run -e AWS_S3_BUCKET=get-staging.docker.io \\
-              AWS_ACCESS_KEY=AKI1234... \\
-              AWS_SECRET_KEY=sEs4mE... \\
-              GPG_PASSPHRASE=m0resEs4mE... \\
-              f0058411
+docker run -e AWS_S3_BUCKET=get-staging.docker.io \
+           -e AWS_ACCESS_KEY=AKI1234... \
+           -e AWS_SECRET_KEY=sEs4mE... \
+           -e GPG_PASSPHRASE=m0resEs4mE... \
+           -i -t -privileged \
+           docker ./hack/release.sh
 EOF
 	exit 1
 }
@@ -39,8 +41,41 @@ EOF
 [ "$AWS_ACCESS_KEY" ] || usage
 [ "$AWS_SECRET_KEY" ] || usage
 [ "$GPG_PASSPHRASE" ] || usage
-[ -d /go/src/github.com/dotcloud/docker/ ] || usage
-cd /go/src/github.com/dotcloud/docker/ 
+[ -d /go/src/github.com/dotcloud/docker ] || usage
+cd /go/src/github.com/dotcloud/docker
+[ -x hack/make.sh ] || usage
+
+RELEASE_BUNDLES=(
+	binary
+	ubuntu
+)
+
+if [ "$1" != '--release-regardless-of-test-failure' ]; then
+	RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" )
+fi
+
+if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
+	echo >&2
+	echo >&2 'The build or tests appear to have failed.'
+	echo >&2
+	echo >&2 'You, as the release  maintainer, now have a couple options:'
+	echo >&2 '- delay release and fix issues'
+	echo >&2 '- delay release and fix issues'
+	echo >&2 '- did we mention how important this is?  issues need fixing :)'
+	echo >&2
+	echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
+	echo >&2 ' really knows all the hairy problems at hand with the current release'
+	echo >&2 ' issues) may bypass this checking by running this script again with the'
+	echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
+	echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
+	echo >&2 ' avoid using this if at all possible.'
+	echo >&2
+	echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
+	echo >&2 ' should be used.  If there are release issues, we should always err on the'
+	echo >&2 ' side of caution.'
+	echo >&2
+	exit 1
+fi
 
 VERSION=$(cat VERSION)
 BUCKET=$AWS_S3_BUCKET
@@ -81,11 +116,15 @@ s3_url() {
 # 1. A full APT repository is published at $BUCKET/ubuntu/
 # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
 release_ubuntu() {
+	[ -e bundles/$VERSION/ubuntu ] || {
+		echo >&2 './hack/make.sh must be run before release_ubuntu'
+		exit 1
+	}
 	# Make sure that we have our keys
 	mkdir -p /.gnupg/
 	s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
 	gpg --list-keys releasedocker >/dev/null || {
-		gpg --gen-key --batch <<EOF   
+		gpg --gen-key --batch <<EOF
 Key-Type: RSA
 Key-Length: 2048
 Passphrase: $GPG_PASSPHRASE
@@ -99,7 +138,7 @@ EOF
 
 	# Sign our packages
 	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
-		 --sign builder bundles/$VERSION/ubuntu/*.deb
+		--sign builder bundles/$VERSION/ubuntu/*.deb
 
 	# Setup the APT repo
 	APTDIR=bundles/$VERSION/ubuntu/apt
@@ -116,8 +155,7 @@ EOF
 	reprepro -b $APTDIR includedeb docker $DEBFILE
 
 	# Sign
-	for F in $(find $APTDIR -name Release)
-	do
+	for F in $(find $APTDIR -name Release); do
 		gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
 			--armor --sign --detach-sign \
 			--output $F.gpg $F
@@ -137,13 +175,20 @@ echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
 curl $(s3_url)/gpg | apt-key add -
 # Install docker
 apt-get update ; apt-get install -y lxc-docker
+
+#
+# Alternatively, just use the curl-able install.sh script provided at $(s3_url)
+#
 EOF
 	echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
 }
 
 # Upload a static binary to S3
 release_binary() {
-	[ -e bundles/$VERSION ]
+	[ -e bundles/$VERSION/binary/docker-$VERSION ] || {
+		echo >&2 './hack/make.sh must be run before release_binary'
+		exit 1
+	}
 	S3DIR=s3://$BUCKET/builds/Linux/x86_64
 	s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
 	cat <<EOF | write_to_s3 s3://$BUCKET/builds/info