release.sh 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #!/usr/bin/env bash
  2. set -e
  3. set -o pipefail
  4. usage() {
  5. cat >&2 <<'EOF'
  6. To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
  7. (with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
  8. and set the AWS_S3_BUCKET env var to the name of your bucket.
  9. If you're publishing the current release's documentation, also set `BUILD_ROOT=yes`
  10. make AWS_S3_BUCKET=docs-stage.docker.com docs-release
  11. will then push the documentation site to your s3 bucket.
  12. Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server
  13. EOF
  14. exit 1
  15. }
  16. [ "$AWS_S3_BUCKET" ] || usage
  17. VERSION=$(cat VERSION)
  18. if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then
  19. if [ "${VERSION%-dev}" != "$VERSION" ]; then
  20. echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
  21. exit 1
  22. fi
  23. cat > ./sources/robots.txt <<'EOF'
  24. User-agent: *
  25. Allow: /
  26. EOF
  27. else
  28. cat > ./sources/robots.txt <<'EOF'
  29. User-agent: *
  30. Disallow: /
  31. EOF
  32. fi
  33. # Remove the last version - 1.0.2-dev -> 1.0
  34. MAJOR_MINOR="v${VERSION%.*}"
  35. export MAJOR_MINOR
  36. export BUCKET=$AWS_S3_BUCKET
  37. export AWS_CONFIG_FILE=$(pwd)/awsconfig
  38. [ -e "$AWS_CONFIG_FILE" ] || usage
  39. export AWS_DEFAULT_PROFILE=$BUCKET
  40. echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
  41. setup_s3() {
  42. echo "Create $BUCKET"
  43. # Try creating the bucket. Ignore errors (it might already exist).
  44. aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
  45. # Check access to the bucket.
  46. echo "test $BUCKET exists"
  47. aws s3 --profile $BUCKET ls s3://$BUCKET
  48. # Make the bucket accessible through website endpoints.
  49. echo "make $BUCKET accessible as a website"
  50. #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
  51. s3conf=$(cat s3_website.json | envsubst)
  52. echo
  53. echo $s3conf
  54. echo
  55. aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
  56. }
  57. build_current_documentation() {
  58. mkdocs build
  59. cd site/
  60. gzip -9k -f search_content.json
  61. cd ..
  62. }
  63. upload_current_documentation() {
  64. src=site/
  65. dst=s3://$BUCKET$1
  66. cache=max-age=3600
  67. if [ "$NOCACHE" ]; then
  68. cache=no-cache
  69. fi
  70. echo
  71. echo "Uploading $src"
  72. echo " to $dst"
  73. echo
  74. # a really complicated way to send only the files we want
  75. # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
  76. # versions.html_fragment
  77. include="--recursive --include \"*.$i\" "
  78. echo "uploading *.$i"
  79. run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include"
  80. echo "======================="
  81. echo "$run"
  82. echo "======================="
  83. $run
  84. # Make sure the search_content.json.gz file has the right content-encoding
  85. aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst"
  86. }
  87. invalidate_cache() {
  88. if [ "" == "$DISTRIBUTION_ID" ]; then
  89. echo "Skipping Cloudfront cache invalidation"
  90. return
  91. fi
  92. dst=$1
  93. aws configure set preview.cloudfront true
  94. files=$(find site/ -not -name "*.md*" -type f | sed 's/site\///g')
  95. len=${#files[@]}
  96. echo "aws cloudfront create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile
  97. echo "{\"Paths\":{\"Quantity\":$len," >> batchfile
  98. echo "\"Items\": [" >> batchfile
  99. for file in "${files[@]}"
  100. do
  101. if [ "$file" == "${files[${#files[@]}-1]}" ]; then
  102. comma=""
  103. else
  104. comma=","
  105. fi
  106. echo "\"$dst$file\"$comma" >> batchfile
  107. done
  108. echo "]}, \"CallerReference\":" >> batchfile
  109. echo "\"$(date)\"}'" >> batchfile
  110. echo "-----"
  111. cat batchfile
  112. echo "-----"
  113. sh batchfile
  114. echo "-----"
  115. }
  116. if [ "$OPTIONS" != "--dryrun" ]; then
  117. setup_s3
  118. fi
  119. # Default to only building the version specific docs so we don't clober the latest by accident with old versions
  120. if [ "$BUILD_ROOT" == "yes" ]; then
  121. echo "Building root documentation"
  122. build_current_documentation
  123. upload_current_documentation
  124. [ "$NOCACHE" ] || invalidate_cache
  125. fi
  126. #build again with /v1.0/ prefix
  127. sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
  128. echo "Building the /$MAJOR_MINOR/ documentation"
  129. build_current_documentation
  130. upload_current_documentation "/$MAJOR_MINOR/"
  131. [ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR"