release.sh 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. #!/bin/bash
  2. set -e
  3. set -o pipefail
  4. usage() {
  5. cat >&2 <<'EOF'
  6. To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
  7. (with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
  8. and set the AWS_S3_BUCKET env var to the name of your bucket.
  9. If you're publishing the current release's documentation, also set `BUILD_ROOT=yes`
  10. make AWS_S3_BUCKET=docs-stage.docker.com docs-release
  11. will then push the documentation site to your s3 bucket.
  12. Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server
  13. You can also add NOCACHE=1 to publish without a cache, which is what we do for the master docs.
  14. EOF
  15. exit 1
  16. }
  17. create_robots_txt() {
  18. cat > ./sources/robots.txt <<'EOF'
  19. User-agent: *
  20. Disallow: /
  21. EOF
  22. }
  23. setup_s3() {
  24. # Try creating the bucket. Ignore errors (it might already exist).
  25. echo "create $BUCKET if it does not exist"
  26. aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
  27. # Check access to the bucket.
  28. echo "test $BUCKET exists"
  29. aws s3 --profile $BUCKET ls s3://$BUCKET
  30. # Make the bucket accessible through website endpoints.
  31. echo "make $BUCKET accessible as a website"
  32. #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
  33. local s3conf=$(cat s3_website.json | envsubst)
  34. aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
  35. }
  36. build_current_documentation() {
  37. mkdocs build
  38. cd site/
  39. gzip -9k -f search_content.json
  40. cd ..
  41. }
  42. upload_current_documentation() {
  43. src=site/
  44. dst=s3://$BUCKET$1
  45. cache=max-age=3600
  46. if [ "$NOCACHE" ]; then
  47. cache=no-cache
  48. fi
  49. printf "\nUploading $src to $dst\n"
  50. # a really complicated way to send only the files we want
  51. # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
  52. # versions.html_fragment
  53. include="--recursive --include \"*.$i\" "
  54. run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include"
  55. printf "\n=====\n$run\n=====\n"
  56. $run
  57. # Make sure the search_content.json.gz file has the right content-encoding
  58. aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst"
  59. }
  60. invalidate_cache() {
  61. if [[ -z "$DISTRIBUTION_ID" ]]; then
  62. echo "Skipping Cloudfront cache invalidation"
  63. return
  64. fi
  65. dst=$1
  66. aws configure set preview.cloudfront true
  67. # Get all the files
  68. # not .md~ files
  69. # replace spaces w %20 so urlencoded
  70. files=( $(find site/ -not -name "*.md*" -type f | sed 's/site//g' | sed 's/ /%20/g') )
  71. len=${#files[@]}
  72. last_file=${files[$((len-1))]}
  73. echo "aws cloudfront create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile
  74. echo "{\"Paths\":{\"Quantity\":$len," >> batchfile
  75. echo "\"Items\": [" >> batchfile
  76. for file in "${files[@]}" ; do
  77. if [[ $file == $last_file ]]; then
  78. comma=""
  79. else
  80. comma=","
  81. fi
  82. echo "\"$dst$file\"$comma" >> batchfile
  83. done
  84. echo "]}, \"CallerReference\":\"$(date)\"}'" >> batchfile
  85. sh batchfile
  86. }
  87. main() {
  88. [ "$AWS_S3_BUCKET" ] || usage
  89. # Make sure there is an awsconfig file
  90. export AWS_CONFIG_FILE=$(pwd)/awsconfig
  91. [ -f "$AWS_CONFIG_FILE" ] || usage
  92. # Get the version
  93. VERSION=$(cat VERSION)
  94. # Disallow pushing dev docs to master
  95. if [ "$AWS_S3_BUCKET" == "docs.docker.com" ] && [ "${VERSION%-dev}" != "$VERSION" ]; then
  96. echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
  97. exit 1
  98. fi
  99. # Clean version - 1.0.2-dev -> 1.0
  100. export MAJOR_MINOR="v${VERSION%.*}"
  101. export BUCKET=$AWS_S3_BUCKET
  102. export AWS_DEFAULT_PROFILE=$BUCKET
  103. # debug variables
  104. echo "bucket: $BUCKET, full version: $VERSION, major-minor: $MAJOR_MINOR"
  105. echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
  106. # create the robots.txt
  107. create_robots_txt
  108. if [ "$OPTIONS" != "--dryrun" ]; then
  109. setup_s3
  110. fi
  111. # Default to only building the version specific docs
  112. # so we don't clober the latest by accident with old versions
  113. if [ "$BUILD_ROOT" == "yes" ]; then
  114. echo "Building root documentation"
  115. build_current_documentation
  116. echo "Uploading root documentation"
  117. upload_current_documentation
  118. [ "$NOCACHE" ] || invalidate_cache
  119. fi
  120. #build again with /v1.0/ prefix
  121. sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
  122. echo "Building the /$MAJOR_MINOR/ documentation"
  123. build_current_documentation
  124. echo "Uploading the documentation"
  125. upload_current_documentation "/$MAJOR_MINOR/"
  126. # Invalidating cache
  127. [ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR"
  128. }
  129. main