release.sh 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. #!/usr/bin/env bash
  2. set -e
  3. # This script looks for bundles built by make.sh, and releases them on a
  4. # public S3 bucket.
  5. #
  6. # Bundles should be available for the VERSION string passed as argument.
  7. #
  8. # The correct way to call this script is inside a container built by the
  9. # official Dockerfile at the root of the Docker source code. The Dockerfile,
  10. # make.sh and release.sh should all be from the same source code revision.
  11. set -o pipefail
  12. # Print a usage message and exit.
  13. usage() {
  14. cat >&2 <<'EOF'
  15. To run, I need:
  16. - to be in a container generated by the Dockerfile at the top of the Docker
  17. repository;
  18. - to be provided with the location of an S3 bucket and path, in
  19. environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
  20. - to be provided with AWS credentials for this S3 bucket, in environment
  21. variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
  22. - a generous amount of good will and nice manners.
  23. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
  24. docker run -e AWS_S3_BUCKET=test.docker.com \
  25. -e AWS_ACCESS_KEY_ID \
  26. -e AWS_SECRET_ACCESS_KEY \
  27. -e AWS_DEFAULT_REGION \
  28. -it --privileged \
  29. docker ./hack/release.sh
  30. EOF
  31. exit 1
  32. }
  33. [ "$AWS_S3_BUCKET" ] || usage
  34. [ "$AWS_ACCESS_KEY_ID" ] || usage
  35. [ "$AWS_SECRET_ACCESS_KEY" ] || usage
  36. [ -d /go/src/github.com/docker/docker ] || usage
  37. cd /go/src/github.com/docker/docker
  38. [ -x hack/make.sh ] || usage
  39. export AWS_DEFAULT_REGION
  40. : ${AWS_DEFAULT_REGION:=us-west-1}
  41. AWS_CLI=${AWS_CLI:-'aws'}
  42. RELEASE_BUNDLES=(
  43. binary
  44. cross
  45. tgz
  46. )
  47. if [ "$1" != '--release-regardless-of-test-failure' ]; then
  48. RELEASE_BUNDLES=(
  49. test-unit
  50. "${RELEASE_BUNDLES[@]}"
  51. test-integration
  52. )
  53. fi
  54. VERSION=$(< VERSION)
  55. BUCKET=$AWS_S3_BUCKET
  56. BUCKET_PATH=$BUCKET
  57. [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
  58. if command -v git &> /dev/null && git rev-parse &> /dev/null; then
  59. if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
  60. echo "You cannot run the release script on a repo with uncommitted changes"
  61. usage
  62. fi
  63. fi
  64. # These are the 2 keys we've used to sign the deb's
  65. # release (get.docker.com)
  66. # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
  67. # test (test.docker.com)
  68. # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
  69. setup_s3() {
  70. echo "Setting up S3"
  71. # Try creating the bucket. Ignore errors (it might already exist).
  72. $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true
  73. # Check access to the bucket.
  74. $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null
  75. # Make the bucket accessible through website endpoints.
  76. $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET"
  77. }
  78. # write_to_s3 uploads the contents of standard input to the specified S3 url.
  79. write_to_s3() {
  80. DEST=$1
  81. F=`mktemp`
  82. cat > "$F"
  83. $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
  84. rm -f "$F"
  85. }
  86. s3_url() {
  87. case "$BUCKET" in
  88. get.docker.com|test.docker.com|experimental.docker.com)
  89. echo "https://$BUCKET_PATH"
  90. ;;
  91. *)
  92. BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
  93. if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
  94. echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
  95. else
  96. echo "$BASE_URL"
  97. fi
  98. ;;
  99. esac
  100. }
  101. build_all() {
  102. echo "Building release"
  103. if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
  104. echo >&2
  105. echo >&2 'The build or tests appear to have failed.'
  106. echo >&2
  107. echo >&2 'You, as the release maintainer, now have a couple options:'
  108. echo >&2 '- delay release and fix issues'
  109. echo >&2 '- delay release and fix issues'
  110. echo >&2 '- did we mention how important this is? issues need fixing :)'
  111. echo >&2
  112. echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
  113. echo >&2 ' really knows all the hairy problems at hand with the current release'
  114. echo >&2 ' issues) may bypass this checking by running this script again with the'
  115. echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
  116. echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
  117. echo >&2 ' avoid using this if at all possible.'
  118. echo >&2
  119. echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
  120. echo >&2 ' should be used. If there are release issues, we should always err on the'
  121. echo >&2 ' side of caution.'
  122. echo >&2
  123. exit 1
  124. fi
  125. }
  126. upload_release_build() {
  127. src="$1"
  128. dst="$2"
  129. latest="$3"
  130. echo
  131. echo "Uploading $src"
  132. echo " to $dst"
  133. echo
  134. $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst"
  135. if [ "$latest" ]; then
  136. echo
  137. echo "Copying to $latest"
  138. echo
  139. $AWS_CLI s3 cp --acl public-read "$dst" "$latest"
  140. fi
  141. # get hash files too (see hash_files() in hack/make.sh)
  142. for hashAlgo in md5 sha256; do
  143. if [ -e "$src.$hashAlgo" ]; then
  144. echo
  145. echo "Uploading $src.$hashAlgo"
  146. echo " to $dst.$hashAlgo"
  147. echo
  148. $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
  149. if [ "$latest" ]; then
  150. echo
  151. echo "Copying to $latest.$hashAlgo"
  152. echo
  153. $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
  154. fi
  155. fi
  156. done
  157. }
  158. release_build() {
  159. echo "Releasing binaries"
  160. GOOS=$1
  161. GOARCH=$2
  162. binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
  163. tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
  164. binary=docker-$VERSION
  165. zipExt=".tgz"
  166. binaryExt=""
  167. tgz=$binary$zipExt
  168. latestBase=
  169. if [ -z "$NOLATEST" ]; then
  170. latestBase=docker-latest
  171. fi
  172. # we need to map our GOOS and GOARCH to uname values
  173. # see https://en.wikipedia.org/wiki/Uname
  174. # ie, GOOS=linux -> "uname -s"=Linux
  175. s3Os=$GOOS
  176. case "$s3Os" in
  177. darwin)
  178. s3Os=Darwin
  179. ;;
  180. freebsd)
  181. s3Os=FreeBSD
  182. ;;
  183. linux)
  184. s3Os=Linux
  185. ;;
  186. solaris)
  187. echo skipping solaris release
  188. return 0
  189. ;;
  190. windows)
  191. # this is windows use the .zip and .exe extensions for the files.
  192. s3Os=Windows
  193. zipExt=".zip"
  194. binaryExt=".exe"
  195. tgz=$binary$zipExt
  196. binary+=$binaryExt
  197. ;;
  198. *)
  199. echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
  200. exit 1
  201. ;;
  202. esac
  203. s3Arch=$GOARCH
  204. case "$s3Arch" in
  205. amd64)
  206. s3Arch=x86_64
  207. ;;
  208. 386)
  209. s3Arch=i386
  210. ;;
  211. arm)
  212. s3Arch=armel
  213. # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
  214. ;;
  215. *)
  216. echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
  217. exit 1
  218. ;;
  219. esac
  220. s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
  221. # latest=
  222. latestTgz=
  223. if [ "$latestBase" ]; then
  224. # commented out since we aren't uploading binaries right now.
  225. # latest="$s3Dir/$latestBase$binaryExt"
  226. # we don't include the $binaryExt because we don't want docker.exe.zip
  227. latestTgz="$s3Dir/$latestBase$zipExt"
  228. fi
  229. if [ ! -f "$tgzDir/$tgz" ]; then
  230. echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
  231. exit 1
  232. fi
  233. # disable binary uploads for now. Only providing tgz downloads
  234. # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
  235. upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
  236. }
  237. # Upload binaries and tgz files to S3
  238. release_binaries() {
  239. [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || {
  240. echo >&2 './hack/make.sh must be run before release_binaries'
  241. exit 1
  242. }
  243. for d in bundles/$VERSION/cross/*/*; do
  244. GOARCH="$(basename "$d")"
  245. GOOS="$(basename "$(dirname "$d")")"
  246. release_build "$GOOS" "$GOARCH"
  247. done
  248. # TODO create redirect from builds/*/i686 to builds/*/i386
  249. cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
  250. # To install, run the following commands as root:
  251. curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
  252. # Then start docker in daemon mode:
  253. /usr/local/bin/dockerd
  254. EOF
  255. # Add redirect at /builds/info for URL-backwards-compatibility
  256. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  257. $AWS_CLI s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
  258. if [ -z "$NOLATEST" ]; then
  259. echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
  260. echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
  261. fi
  262. }
  263. main() {
  264. [ "$SKIP_RELEASE_BUILD" = '1' ] || build_all
  265. setup_s3
  266. release_binaries
  267. }
  268. main
  269. echo
  270. echo
  271. echo "Release complete; see $(s3_url)"
  272. echo "Use the following text to announce the release:"
  273. echo
  274. echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
  275. echo
  276. echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
  277. echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
  278. echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
  279. echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
  280. echo