Browse Source

Fix release.sh script

Use aws instead of s3cmd.
Make sure the release script works with the docker-prepended binary
names.

Signed-off-by: Tibor Vass <tibor@docker.com>
Tibor Vass 9 years ago
parent
commit
dd51e85c05
2 changed files with 24 additions and 29 deletions
  1. 1 1
      hack/make.sh
  2. 23 28
      hack/release.sh

+ 1 - 1
hack/make.sh

@@ -296,7 +296,7 @@ copy_containerd() {
     # OS/ARCH of the build target
     # OS/ARCH of the build target
     if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
     if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
         (set -x
         (set -x
-        if [ -x /usr/local/bin/runc ]; then
+        if [ -x /usr/local/bin/docker-runc ]; then
             echo "Copying nested executables into $dir"
             echo "Copying nested executables into $dir"
 	    for file in containerd containerd-shim containerd-ctr runc; do
 	    for file in containerd containerd-shim containerd-ctr runc; do
                 cp "/usr/local/bin/docker-$file" "$dir/"
                 cp "/usr/local/bin/docker-$file" "$dir/"

+ 23 - 28
hack/release.sh

@@ -21,26 +21,30 @@ To run, I need:
 - to be provided with the location of an S3 bucket and path, in
 - to be provided with the location of an S3 bucket and path, in
   environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
   environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
 - to be provided with AWS credentials for this S3 bucket, in environment
 - to be provided with AWS credentials for this S3 bucket, in environment
-  variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
+  variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
 - a generous amount of good will and nice manners.
 - a generous amount of good will and nice manners.
 The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
 The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
 
 
 docker run -e AWS_S3_BUCKET=test.docker.com \
 docker run -e AWS_S3_BUCKET=test.docker.com \
-           -e AWS_ACCESS_KEY=... \
-           -e AWS_SECRET_KEY=... \
-           -i -t --privileged \
+           -e AWS_ACCESS_KEY_ID     \
+           -e AWS_SECRET_ACCESS_KEY \
+           -e AWS_DEFAULT_REGION    \
+           -it --privileged         \
            docker ./hack/release.sh
            docker ./hack/release.sh
 EOF
 EOF
 	exit 1
 	exit 1
 }
 }
 
 
 [ "$AWS_S3_BUCKET" ] || usage
 [ "$AWS_S3_BUCKET" ] || usage
-[ "$AWS_ACCESS_KEY" ] || usage
-[ "$AWS_SECRET_KEY" ] || usage
+[ "$AWS_ACCESS_KEY_ID" ] || usage
+[ "$AWS_SECRET_ACCESS_KEY" ] || usage
 [ -d /go/src/github.com/docker/docker ] || usage
 [ -d /go/src/github.com/docker/docker ] || usage
 cd /go/src/github.com/docker/docker
 cd /go/src/github.com/docker/docker
 [ -x hack/make.sh ] || usage
 [ -x hack/make.sh ] || usage
 
 
+export AWS_DEFAULT_REGION
+: ${AWS_DEFAULT_REGION:=us-west-2}
+
 RELEASE_BUNDLES=(
 RELEASE_BUNDLES=(
 	binary
 	binary
 	cross
 	cross
@@ -75,16 +79,14 @@ fi
 
 
 setup_s3() {
 setup_s3() {
 	echo "Setting up S3"
 	echo "Setting up S3"
+	# TODO: Move to Dockerfile
+	pip install awscli==1.10.15
 	# Try creating the bucket. Ignore errors (it might already exist).
 	# Try creating the bucket. Ignore errors (it might already exist).
-	s3cmd mb "s3://$BUCKET" 2>/dev/null || true
+	aws s3 mb "s3://$BUCKET" 2>/dev/null || true
 	# Check access to the bucket.
 	# Check access to the bucket.
-	# s3cmd has no useful exit status, so we cannot check that.
-	# Instead, we check if it outputs anything on standard output.
-	# (When there are problems, it uses standard error instead.)
-	# NOTE: for some reason on debian:jessie `s3cmd info ... | grep -q .` results in a broken pipe
-	s3cmd info "s3://$BUCKET" | grep . >/dev/null
+	aws s3 ls "s3://$BUCKET" >/dev/null
 	# Make the bucket accessible through website endpoints.
 	# Make the bucket accessible through website endpoints.
-	s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
+	aws s3 website --index-document index --error-document error "s3://$BUCKET"
 }
 }
 
 
 # write_to_s3 uploads the contents of standard input to the specified S3 url.
 # write_to_s3 uploads the contents of standard input to the specified S3 url.
@@ -92,7 +94,7 @@ write_to_s3() {
 	DEST=$1
 	DEST=$1
 	F=`mktemp`
 	F=`mktemp`
 	cat > "$F"
 	cat > "$F"
-	s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
+	aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
 	rm -f "$F"
 	rm -f "$F"
 }
 }
 
 
@@ -102,6 +104,7 @@ s3_url() {
 			echo "https://$BUCKET_PATH"
 			echo "https://$BUCKET_PATH"
 			;;
 			;;
 		*)
 		*)
+			# TODO: remove s3cmd dependency
 			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
 			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
 			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
 			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
 				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
 				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
@@ -147,12 +150,12 @@ upload_release_build() {
 	echo "Uploading $src"
 	echo "Uploading $src"
 	echo "  to $dst"
 	echo "  to $dst"
 	echo
 	echo
-	s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
+	aws s3 cp --follow-symlinks --acl public-read "$src" "$dst"
 	if [ "$latest" ]; then
 	if [ "$latest" ]; then
 		echo
 		echo
 		echo "Copying to $latest"
 		echo "Copying to $latest"
 		echo
 		echo
-		s3cmd --acl-public cp "$dst" "$latest"
+		aws s3 cp --acl public-read "$dst" "$latest"
 	fi
 	fi
 
 
 	# get hash files too (see hash_files() in hack/make.sh)
 	# get hash files too (see hash_files() in hack/make.sh)
@@ -162,12 +165,12 @@ upload_release_build() {
 			echo "Uploading $src.$hashAlgo"
 			echo "Uploading $src.$hashAlgo"
 			echo "  to $dst.$hashAlgo"
 			echo "  to $dst.$hashAlgo"
 			echo
 			echo
-			s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
+			aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
 			if [ "$latest" ]; then
 			if [ "$latest" ]; then
 				echo
 				echo
 				echo "Copying to $latest.$hashAlgo"
 				echo "Copying to $latest.$hashAlgo"
 				echo
 				echo
-				s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
+				aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
 			fi
 			fi
 		fi
 		fi
 	done
 	done
@@ -268,14 +271,14 @@ release_binaries() {
 
 
 	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
 	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
 # To install, run the following command as root:
 # To install, run the following command as root:
-curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
+curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && sudo tar zxf docker-$VERSION.tgz -C /
 # Then start docker in daemon mode:
 # Then start docker in daemon mode:
 sudo /usr/local/bin/docker daemon
 sudo /usr/local/bin/docker daemon
 EOF
 EOF
 
 
 	# Add redirect at /builds/info for URL-backwards-compatibility
 	# Add redirect at /builds/info for URL-backwards-compatibility
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
-	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
+	aws s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
 
 
 	if [ -z "$NOLATEST" ]; then
 	if [ -z "$NOLATEST" ]; then
 		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
 		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
@@ -290,19 +293,11 @@ release_index() {
 	write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
 	write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
 }
 }
 
 
-release_test() {
-	echo "Releasing tests"
-	if [ -e "bundles/$VERSION/test" ]; then
-		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/"
-	fi
-}
-
 main() {
 main() {
 	build_all
 	build_all
 	setup_s3
 	setup_s3
 	release_binaries
 	release_binaries
 	release_index
 	release_index
-	release_test
 }
 }
 
 
 main
 main