فهرست منبع

Add support for longer S3 bucket paths

Signed-off-by: Mike Dougherty <mike.dougherty@docker.com>
Mike Dougherty 10 سال پیش
والد
کامیت
b46c15e772
1فایلهای تغییر یافته به همراه18 افزوده شده و 11 حذف شده
  1. 18 11
      hack/release.sh

+ 18 - 11
hack/release.sh

@@ -18,8 +18,8 @@ usage() {
 To run, I need:
 - to be in a container generated by the Dockerfile at the top of the Docker
   repository;
-- to be provided with the name of an S3 bucket, in environment variable
-  AWS_S3_BUCKET;
+- to be provided with the location of an S3 bucket and path, in
+  environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
 - to be provided with AWS credentials for this S3 bucket, in environment
   variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
 - the passphrase to unlock the GPG key which will sign the deb packages
@@ -62,6 +62,8 @@ fi
 
 VERSION=$(< VERSION)
 BUCKET=$AWS_S3_BUCKET
+BUCKET_PATH=$BUCKET
+[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
 
 # These are the 2 keys we've used to sign the deb's
 #   release (get.docker.com)
@@ -94,10 +96,15 @@ write_to_s3() {
 s3_url() {
 	case "$BUCKET" in
 		get.docker.com|test.docker.com|experimental.docker.com)
-			echo "https://$BUCKET"
+			echo "https://$BUCKET_PATH"
 			;;
 		*)
-			s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
+			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
+			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
+				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
+			else
+				echo "$BASE_URL"
+			fi
 			;;
 	esac
 }
@@ -224,7 +231,7 @@ release_build() {
 			;;
 	esac
 
-	s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
+	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
 	latest=
 	latestTgz=
 	if [ "$latestBase" ]; then
@@ -323,7 +330,7 @@ release_binaries() {
 
 	# TODO create redirect from builds/*/i686 to builds/*/i386
 
-	cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
+	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
 # To install, run the following command as root:
 curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
 # Then start docker in daemon mode:
@@ -332,24 +339,24 @@ EOF
 
 	# Add redirect at /builds/info for URL-backwards-compatibility
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
-	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info"
+	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
 
 	if [ -z "$NOLATEST" ]; then
-		echo "Advertising $VERSION on $BUCKET as most recent version"
-		echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest"
+		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
+		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
 	fi
 }
 
 # Upload the index script
 release_index() {
 	echo "Releasing index"
-	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
+	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET_PATH/index"
 }
 
 release_test() {
 	echo "Releasing tests"
 	if [ -e "bundles/$VERSION/test" ]; then
-		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
+		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/"
 	fi
 }