فهرست منبع

Merge pull request #9495 from SvenDowideit/aws-cli-exclude-still-broken

Something changed, broke the docs release script, and it seems that --ex...
James Turnbull 10 سال پیش
والد
کامیت
603fe40661
2فایلهای تغییر یافته به همراه8 افزوده شده و 29 حذف شده
  1. 4 2
      docs/README.md
  2. 4 27
      docs/release.sh

+ 4 - 2
docs/README.md

@@ -131,8 +131,8 @@ Once the PR has the needed `LGTM`s, merge it, then publish to our beta server
 to test:
 to test:
 
 
     git fetch upstream
     git fetch upstream
-    git checkout post-1.2.0-docs-update-1
-    git reset --hard upstream/post-1.2.0-docs-update-1
+    git checkout docs
+    git reset --hard upstream/docs
     make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
     make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
 
 
 Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/
 Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/
@@ -141,6 +141,8 @@ to view your results and make sure what you published is what you wanted.
 When you're happy with it, publish the docs to our live site:
 When you're happy with it, publish the docs to our live site:
 
 
     make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release
     make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release
+
+Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/
     
     
 Note that the new docs will not appear live on the site until the cache (a complex,
 Note that the new docs will not appear live on the site until the cache (a complex,
 distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker
 distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker

+ 4 - 27
docs/release.sh

@@ -88,36 +88,13 @@ upload_current_documentation() {
 	# a really complicated way to send only the files we want
 	# a really complicated way to send only the files we want
 	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
 	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
 	#  versions.html_fragment
 	#  versions.html_fragment
-	endings=( json txt html xml css js gif png JPG ttf svg woff html_fragment )
-	for i in ${endings[@]}; do
-		include=""
-		for j in ${endings[@]}; do
-			if [ "$i" != "$j" ];then
-				include="$include --exclude *.$j"
-			fi
-		done
-		include="--include *.$i $include"
+		include="--recursive --include \"*.$i\" "
 		echo "uploading *.$i"
 		echo "uploading *.$i"
-		run="aws s3 sync $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \
-			$include \
-			--exclude *.text* \
-			--exclude *.*~ \
-			--exclude *Dockerfile \
-			--exclude *.DS_Store \
-			--exclude *.psd \
-			--exclude *.ai \
-			--exclude *.eot \
-			--exclude *.otf \
-			--exclude *.rej \
-			--exclude *.rst \
-			--exclude *.orig \
-			--exclude *.py \
-			$src $dst"
+		run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read $include"
+		echo "======================="
+		echo "$run"
 		echo "======================="
 		echo "======================="
-		#echo "$run"
-		#echo "======================="
 		$run
 		$run
-	done
 }
 }
 
 
 if [ "$OPTIONS" != "--dryrun" ]; then
 if [ "$OPTIONS" != "--dryrun" ]; then