Browse Source

Merge pull request #11328 from jfrazelle/fix-docs-release-script

Fix clear cache docs release.
Jessie Frazelle 10 years ago
parent
commit
42f5c87425
3 changed files with 167 additions and 1 deletions
  1. 0 1
      Makefile
  2. 1 0
      docs/Dockerfile
  3. 166 0
      docs/release.sh

+ 0 - 1
Makefile

@@ -86,7 +86,6 @@ build: bundles
 	docker build -t "$(DOCKER_IMAGE)" .
 	docker build -t "$(DOCKER_IMAGE)" .
 
 
 docs-build:
 docs-build:
-	git fetch https://github.com/docker/docker.git docs && git diff --name-status FETCH_HEAD...HEAD -- docs > docs/changed-files
 	cp ./VERSION docs/VERSION
 	cp ./VERSION docs/VERSION
 	echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
 	echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
 #	echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
 #	echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET

+ 1 - 0
docs/Dockerfile

@@ -21,6 +21,7 @@ COPY ./VERSION VERSION
 # TODO: don't do this - look at merging the yml file in build.sh
 # TODO: don't do this - look at merging the yml file in build.sh
 COPY ./mkdocs.yml mkdocs.yml
 COPY ./mkdocs.yml mkdocs.yml
 COPY ./s3_website.json s3_website.json
 COPY ./s3_website.json s3_website.json
+COPY ./release.sh release.sh
 
 
 # Docker Swarm
 # Docker Swarm
 #ADD https://raw.githubusercontent.com/docker/swarm/master/docs/mkdocs.yml /docs/mkdocs-swarm.yml
 #ADD https://raw.githubusercontent.com/docker/swarm/master/docs/mkdocs.yml /docs/mkdocs-swarm.yml

+ 166 - 0
docs/release.sh

@@ -0,0 +1,166 @@
+#!/usr/bin/env bash
+set -e
+
+set -o pipefail
+
+usage() {
+	cat >&2 <<'EOF'
+To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file 
+(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
+and set the AWS_S3_BUCKET env var to the name of your bucket.
+
+If you're publishing the current release's documentation, also set `BUILD_ROOT=yes`
+
+make AWS_S3_BUCKET=docs-stage.docker.com docs-release
+
+will then push the documentation site to your s3 bucket.
+
+ Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server
+EOF
+	exit 1
+}
+
+[ "$AWS_S3_BUCKET" ] || usage
+
+VERSION=$(cat VERSION)
+
+if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then
+	if [ "${VERSION%-dev}" != "$VERSION" ]; then
+		echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
+		exit 1
+	fi
+	cat > ./sources/robots.txt <<'EOF'
+User-agent: *
+Allow: /
+EOF
+
+else
+	cat > ./sources/robots.txt <<'EOF'
+User-agent: *
+Disallow: /
+EOF
+fi
+
+# Remove the last version - 1.0.2-dev -> 1.0
+MAJOR_MINOR="v${VERSION%.*}"
+export MAJOR_MINOR
+
+export BUCKET=$AWS_S3_BUCKET
+
+export AWS_CONFIG_FILE=$(pwd)/awsconfig
+[ -e "$AWS_CONFIG_FILE" ] || usage
+export AWS_DEFAULT_PROFILE=$BUCKET
+
+echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
+
+setup_s3() {
+	echo "Create $BUCKET"
+	# Try creating the bucket. Ignore errors (it might already exist).
+	aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
+	# Check access to the bucket.
+	echo "test $BUCKET exists"
+	aws s3 --profile $BUCKET ls s3://$BUCKET
+	# Make the bucket accessible through website endpoints.
+	echo "make $BUCKET accessible as a website"
+	#aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
+	s3conf=$(cat s3_website.json | envsubst)
+	echo
+	echo $s3conf
+	echo
+	aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
+}
+
+build_current_documentation() {
+	mkdocs build
+	cd site/
+	gzip -9k -f search_content.json
+	cd ..
+}
+
+upload_current_documentation() {
+	src=site/
+	dst=s3://$BUCKET$1
+
+	cache=max-age=3600
+	if [ "$NOCACHE" ]; then
+		cache=no-cache
+	fi
+
+	echo
+	echo "Uploading $src"
+	echo "  to $dst"
+	echo
+
+	# a really complicated way to send only the files we want
+	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
+	#  versions.html_fragment
+		include="--recursive --include \"*.$i\" "
+		echo "uploading *.$i"
+		run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include"
+		echo "======================="
+		echo "$run"
+		echo "======================="
+		$run
+
+	# Make sure the search_content.json.gz file has the right content-encoding
+	aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst"
+}
+
+invalidate_cache() {
+	if [ "" == "$DISTRIBUTION_ID" ]; then
+		echo "Skipping Cloudfront cache invalidation"
+		return
+	fi
+
+	dst=$1
+
+	aws configure set preview.cloudfront true
+
+	files=$(find site/ -not -name "*.md*" -type f | sed 's/site\///g')
+
+	len=${#files[@]}
+
+	echo "aws cloudfront  create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile
+	echo "{\"Paths\":{\"Quantity\":$len," >> batchfile
+	echo "\"Items\": [" >> batchfile
+
+	for file in "${files[@]}"
+	do
+		if [ "$file" == "${files[${#files[@]}-1]}" ]; then
+			comma=""
+		else
+			comma=","
+		fi
+		echo "\"$dst$file\"$comma" >> batchfile
+	done
+
+	echo "]}, \"CallerReference\":" >> batchfile
+	echo "\"$(date)\"}'" >> batchfile
+
+
+	echo "-----"
+	cat batchfile
+	echo "-----"
+	sh batchfile
+	echo "-----"
+}
+
+
+if [ "$OPTIONS" != "--dryrun" ]; then
+	setup_s3
+fi
+
+# Default to only building the version specific docs so we don't clober the latest by accident with old versions
+if [ "$BUILD_ROOT" == "yes" ]; then
+	echo "Building root documentation"
+	build_current_documentation
+	upload_current_documentation
+	[ "$NOCACHE" ] || invalidate_cache
+fi
+
+#build again with /v1.0/ prefix
+sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
+echo "Building the /$MAJOR_MINOR/ documentation"
+build_current_documentation
+upload_current_documentation "/$MAJOR_MINOR/"
+[ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR"