release.sh 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. #!/usr/bin/env bash
  2. set -e
  3. # This script looks for bundles built by make.sh, and releases them on a
  4. # public S3 bucket.
  5. #
  6. # Bundles should be available for the VERSION string passed as argument.
  7. #
  8. # The correct way to call this script is inside a container built by the
  9. # official Dockerfile at the root of the Docker source code. The Dockerfile,
  10. # make.sh and release.sh should all be from the same source code revision.
  11. set -o pipefail
  12. # Print a usage message and exit.
  13. usage() {
  14. cat >&2 <<'EOF'
  15. To run, I need:
  16. - to be in a container generated by the Dockerfile at the top of the Docker
  17. repository;
  18. - to be provided with the location of an S3 bucket and path, in
  19. environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
  20. - to be provided with AWS credentials for this S3 bucket, in environment
  21. variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
  22. - a generous amount of good will and nice manners.
  23. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
  24. docker run -e AWS_S3_BUCKET=test.docker.com \
  25. -e AWS_ACCESS_KEY_ID \
  26. -e AWS_SECRET_ACCESS_KEY \
  27. -e AWS_DEFAULT_REGION \
  28. -it --privileged \
  29. docker ./hack/release.sh
  30. EOF
  31. exit 1
  32. }
  33. [ "$AWS_S3_BUCKET" ] || usage
  34. [ "$AWS_ACCESS_KEY_ID" ] || usage
  35. [ "$AWS_SECRET_ACCESS_KEY" ] || usage
  36. [ -d /go/src/github.com/docker/docker ] || usage
  37. cd /go/src/github.com/docker/docker
  38. [ -x hack/make.sh ] || usage
  39. export AWS_DEFAULT_REGION
  40. : ${AWS_DEFAULT_REGION:=us-west-1}
  41. RELEASE_BUNDLES=(
  42. binary
  43. cross
  44. tgz
  45. )
  46. if [ "$1" != '--release-regardless-of-test-failure' ]; then
  47. RELEASE_BUNDLES=(
  48. test-unit
  49. "${RELEASE_BUNDLES[@]}"
  50. test-integration-cli
  51. )
  52. fi
  53. VERSION=$(< VERSION)
  54. BUCKET=$AWS_S3_BUCKET
  55. BUCKET_PATH=$BUCKET
  56. [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
  57. if command -v git &> /dev/null && git rev-parse &> /dev/null; then
  58. if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
  59. echo "You cannot run the release script on a repo with uncommitted changes"
  60. usage
  61. fi
  62. fi
  63. # These are the 2 keys we've used to sign the deb's
  64. # release (get.docker.com)
  65. # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
  66. # test (test.docker.com)
  67. # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
  68. setup_s3() {
  69. echo "Setting up S3"
  70. # Try creating the bucket. Ignore errors (it might already exist).
  71. aws s3 mb "s3://$BUCKET" 2>/dev/null || true
  72. # Check access to the bucket.
  73. aws s3 ls "s3://$BUCKET" >/dev/null
  74. # Make the bucket accessible through website endpoints.
  75. aws s3 website --index-document index --error-document error "s3://$BUCKET"
  76. }
  77. # write_to_s3 uploads the contents of standard input to the specified S3 url.
  78. write_to_s3() {
  79. DEST=$1
  80. F=`mktemp`
  81. cat > "$F"
  82. aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
  83. rm -f "$F"
  84. }
  85. s3_url() {
  86. case "$BUCKET" in
  87. get.docker.com|test.docker.com|experimental.docker.com)
  88. echo "https://$BUCKET_PATH"
  89. ;;
  90. *)
  91. BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
  92. if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
  93. echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
  94. else
  95. echo "$BASE_URL"
  96. fi
  97. ;;
  98. esac
  99. }
  100. build_all() {
  101. echo "Building release"
  102. if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
  103. echo >&2
  104. echo >&2 'The build or tests appear to have failed.'
  105. echo >&2
  106. echo >&2 'You, as the release maintainer, now have a couple options:'
  107. echo >&2 '- delay release and fix issues'
  108. echo >&2 '- delay release and fix issues'
  109. echo >&2 '- did we mention how important this is? issues need fixing :)'
  110. echo >&2
  111. echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
  112. echo >&2 ' really knows all the hairy problems at hand with the current release'
  113. echo >&2 ' issues) may bypass this checking by running this script again with the'
  114. echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
  115. echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
  116. echo >&2 ' avoid using this if at all possible.'
  117. echo >&2
  118. echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
  119. echo >&2 ' should be used. If there are release issues, we should always err on the'
  120. echo >&2 ' side of caution.'
  121. echo >&2
  122. exit 1
  123. fi
  124. }
  125. upload_release_build() {
  126. src="$1"
  127. dst="$2"
  128. latest="$3"
  129. echo
  130. echo "Uploading $src"
  131. echo " to $dst"
  132. echo
  133. aws s3 cp --follow-symlinks --acl public-read "$src" "$dst"
  134. if [ "$latest" ]; then
  135. echo
  136. echo "Copying to $latest"
  137. echo
  138. aws s3 cp --acl public-read "$dst" "$latest"
  139. fi
  140. # get hash files too (see hash_files() in hack/make.sh)
  141. for hashAlgo in md5 sha256; do
  142. if [ -e "$src.$hashAlgo" ]; then
  143. echo
  144. echo "Uploading $src.$hashAlgo"
  145. echo " to $dst.$hashAlgo"
  146. echo
  147. aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
  148. if [ "$latest" ]; then
  149. echo
  150. echo "Copying to $latest.$hashAlgo"
  151. echo
  152. aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
  153. fi
  154. fi
  155. done
  156. }
  157. release_build() {
  158. echo "Releasing binaries"
  159. GOOS=$1
  160. GOARCH=$2
  161. binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
  162. tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
  163. binary=docker-$VERSION
  164. zipExt=".tgz"
  165. binaryExt=""
  166. tgz=$binary$zipExt
  167. latestBase=
  168. if [ -z "$NOLATEST" ]; then
  169. latestBase=docker-latest
  170. fi
  171. # we need to map our GOOS and GOARCH to uname values
  172. # see https://en.wikipedia.org/wiki/Uname
  173. # ie, GOOS=linux -> "uname -s"=Linux
  174. s3Os=$GOOS
  175. case "$s3Os" in
  176. darwin)
  177. s3Os=Darwin
  178. ;;
  179. freebsd)
  180. s3Os=FreeBSD
  181. ;;
  182. linux)
  183. s3Os=Linux
  184. ;;
  185. windows)
  186. # this is windows use the .zip and .exe extentions for the files.
  187. s3Os=Windows
  188. zipExt=".zip"
  189. binaryExt=".exe"
  190. tgz=$binary$zipExt
  191. binary+=$binaryExt
  192. ;;
  193. *)
  194. echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
  195. exit 1
  196. ;;
  197. esac
  198. s3Arch=$GOARCH
  199. case "$s3Arch" in
  200. amd64)
  201. s3Arch=x86_64
  202. ;;
  203. 386)
  204. s3Arch=i386
  205. ;;
  206. arm)
  207. s3Arch=armel
  208. # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
  209. ;;
  210. *)
  211. echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
  212. exit 1
  213. ;;
  214. esac
  215. s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
  216. # latest=
  217. latestTgz=
  218. if [ "$latestBase" ]; then
  219. # commented out since we aren't uploading binaries right now.
  220. # latest="$s3Dir/$latestBase$binaryExt"
  221. # we don't include the $binaryExt because we don't want docker.exe.zip
  222. latestTgz="$s3Dir/$latestBase$zipExt"
  223. fi
  224. if [ ! -f "$tgzDir/$tgz" ]; then
  225. echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
  226. exit 1
  227. fi
  228. # disable binary uploads for now. Only providing tgz downloads
  229. # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
  230. upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
  231. }
  232. # Upload binaries and tgz files to S3
  233. release_binaries() {
  234. [ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
  235. echo >&2 './hack/make.sh must be run before release_binaries'
  236. exit 1
  237. }
  238. for d in bundles/$VERSION/cross/*/*; do
  239. GOARCH="$(basename "$d")"
  240. GOOS="$(basename "$(dirname "$d")")"
  241. release_build "$GOOS" "$GOARCH"
  242. done
  243. # TODO create redirect from builds/*/i686 to builds/*/i386
  244. cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
  245. # To install, run the following commands as root:
  246. curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
  247. # Then start docker in daemon mode:
  248. /usr/local/bin/dockerd
  249. EOF
  250. # Add redirect at /builds/info for URL-backwards-compatibility
  251. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  252. aws s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
  253. if [ -z "$NOLATEST" ]; then
  254. echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
  255. echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
  256. fi
  257. }
  258. # Upload the index script
  259. release_index() {
  260. echo "Releasing index"
  261. url="$(s3_url)/" hack/make.sh install-script
  262. write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
  263. }
  264. main() {
  265. build_all
  266. setup_s3
  267. release_binaries
  268. release_index
  269. }
  270. main
  271. echo
  272. echo
  273. echo "Release complete; see $(s3_url)"
  274. echo "Use the following text to announce the release:"
  275. echo
  276. echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
  277. echo
  278. echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
  279. echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
  280. echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
  281. echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
  282. echo