release.sh 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. #!/usr/bin/env bash
  2. set -e
  3. # This script looks for bundles built by make.sh, and releases them on a
  4. # public S3 bucket.
  5. #
  6. # Bundles should be available for the VERSION string passed as argument.
  7. #
  8. # The correct way to call this script is inside a container built by the
  9. # official Dockerfile at the root of the Docker source code. The Dockerfile,
  10. # make.sh and release.sh should all be from the same source code revision.
  11. set -o pipefail
  12. # Print a usage message and exit.
  13. usage() {
  14. cat >&2 <<'EOF'
  15. To run, I need:
  16. - to be in a container generated by the Dockerfile at the top of the Docker
  17. repository;
  18. - to be provided with the name of an S3 bucket, in environment variable
  19. AWS_S3_BUCKET;
  20. - to be provided with AWS credentials for this S3 bucket, in environment
  21. variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
  22. - the passphrase to unlock the GPG key which will sign the deb packages
  23. (passed as environment variable GPG_PASSPHRASE);
  24. - a generous amount of good will and nice manners.
  25. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
  26. docker run -e AWS_S3_BUCKET=test.docker.com \
  27. -e AWS_ACCESS_KEY=... \
  28. -e AWS_SECRET_KEY=... \
  29. -e GPG_PASSPHRASE=... \
  30. -i -t --privileged \
  31. docker ./hack/release.sh
  32. EOF
  33. exit 1
  34. }
  35. [ "$AWS_S3_BUCKET" ] || usage
  36. [ "$AWS_ACCESS_KEY" ] || usage
  37. [ "$AWS_SECRET_KEY" ] || usage
  38. [ "$GPG_PASSPHRASE" ] || usage
  39. [ -d /go/src/github.com/docker/docker ] || usage
  40. cd /go/src/github.com/docker/docker
  41. [ -x hack/make.sh ] || usage
  42. RELEASE_BUNDLES=(
  43. binary
  44. cross
  45. tgz
  46. ubuntu
  47. )
  48. if [ "$1" != '--release-regardless-of-test-failure' ]; then
  49. RELEASE_BUNDLES=(
  50. test-unit
  51. "${RELEASE_BUNDLES[@]}"
  52. test-integration-cli
  53. )
  54. fi
  55. VERSION=$(< VERSION)
  56. BUCKET=$AWS_S3_BUCKET
  57. # These are the 2 keys we've used to sign the deb's
  58. # release (get.docker.com)
  59. # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
  60. # test (test.docker.com)
  61. # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
  62. setup_s3() {
  63. # Try creating the bucket. Ignore errors (it might already exist).
  64. s3cmd mb "s3://$BUCKET" 2>/dev/null || true
  65. # Check access to the bucket.
  66. # s3cmd has no useful exit status, so we cannot check that.
  67. # Instead, we check if it outputs anything on standard output.
  68. # (When there are problems, it uses standard error instead.)
  69. s3cmd info "s3://$BUCKET" | grep -q .
  70. # Make the bucket accessible through website endpoints.
  71. s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
  72. }
  73. # write_to_s3 uploads the contents of standard input to the specified S3 url.
  74. write_to_s3() {
  75. DEST=$1
  76. F=`mktemp`
  77. cat > "$F"
  78. s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
  79. rm -f "$F"
  80. }
  81. s3_url() {
  82. case "$BUCKET" in
  83. get.docker.com|test.docker.com)
  84. echo "https://$BUCKET"
  85. ;;
  86. *)
  87. s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
  88. ;;
  89. esac
  90. }
  91. build_all() {
  92. if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
  93. echo >&2
  94. echo >&2 'The build or tests appear to have failed.'
  95. echo >&2
  96. echo >&2 'You, as the release maintainer, now have a couple options:'
  97. echo >&2 '- delay release and fix issues'
  98. echo >&2 '- delay release and fix issues'
  99. echo >&2 '- did we mention how important this is? issues need fixing :)'
  100. echo >&2
  101. echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
  102. echo >&2 ' really knows all the hairy problems at hand with the current release'
  103. echo >&2 ' issues) may bypass this checking by running this script again with the'
  104. echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
  105. echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
  106. echo >&2 ' avoid using this if at all possible.'
  107. echo >&2
  108. echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
  109. echo >&2 ' should be used. If there are release issues, we should always err on the'
  110. echo >&2 ' side of caution.'
  111. echo >&2
  112. exit 1
  113. fi
  114. }
  115. upload_release_build() {
  116. src="$1"
  117. dst="$2"
  118. latest="$3"
  119. echo
  120. echo "Uploading $src"
  121. echo " to $dst"
  122. echo
  123. s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
  124. if [ "$latest" ]; then
  125. echo
  126. echo "Copying to $latest"
  127. echo
  128. s3cmd --acl-public cp "$dst" "$latest"
  129. fi
  130. # get hash files too (see hash_files() in hack/make.sh)
  131. for hashAlgo in md5 sha256; do
  132. if [ -e "$src.$hashAlgo" ]; then
  133. echo
  134. echo "Uploading $src.$hashAlgo"
  135. echo " to $dst.$hashAlgo"
  136. echo
  137. s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
  138. if [ "$latest" ]; then
  139. echo
  140. echo "Copying to $latest.$hashAlgo"
  141. echo
  142. s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
  143. fi
  144. fi
  145. done
  146. }
  147. release_build() {
  148. GOOS=$1
  149. GOARCH=$2
  150. binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
  151. tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
  152. binary=docker-$VERSION
  153. tgz=docker-$VERSION.tgz
  154. latestBase=
  155. if [ -z "$NOLATEST" ]; then
  156. latestBase=docker-latest
  157. fi
  158. # we need to map our GOOS and GOARCH to uname values
  159. # see https://en.wikipedia.org/wiki/Uname
  160. # ie, GOOS=linux -> "uname -s"=Linux
  161. s3Os=$GOOS
  162. case "$s3Os" in
  163. darwin)
  164. s3Os=Darwin
  165. ;;
  166. freebsd)
  167. s3Os=FreeBSD
  168. ;;
  169. linux)
  170. s3Os=Linux
  171. ;;
  172. windows)
  173. s3Os=Windows
  174. binary+='.exe'
  175. if [ "$latestBase" ]; then
  176. latestBase+='.exe'
  177. fi
  178. ;;
  179. *)
  180. echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
  181. exit 1
  182. ;;
  183. esac
  184. s3Arch=$GOARCH
  185. case "$s3Arch" in
  186. amd64)
  187. s3Arch=x86_64
  188. ;;
  189. 386)
  190. s3Arch=i386
  191. ;;
  192. arm)
  193. s3Arch=armel
  194. # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
  195. ;;
  196. *)
  197. echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
  198. exit 1
  199. ;;
  200. esac
  201. s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
  202. latest=
  203. latestTgz=
  204. if [ "$latestBase" ]; then
  205. latest="$s3Dir/$latestBase"
  206. latestTgz="$s3Dir/$latestBase.tgz"
  207. fi
  208. if [ ! -x "$binDir/$binary" ]; then
  209. echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
  210. exit 1
  211. fi
  212. if [ ! -f "$tgzDir/$tgz" ]; then
  213. echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
  214. exit 1
  215. fi
  216. upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
  217. upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
  218. }
  219. # Upload the 'ubuntu' bundle to S3:
  220. # 1. A full APT repository is published at $BUCKET/ubuntu/
  221. # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
  222. release_ubuntu() {
  223. [ -e "bundles/$VERSION/ubuntu" ] || {
  224. echo >&2 './hack/make.sh must be run before release_ubuntu'
  225. exit 1
  226. }
  227. local debfiles=( "bundles/$VERSION/ubuntu/"*.deb )
  228. # Sign our packages
  229. dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker --sign builder "${debfiles[@]}"
  230. # Setup the APT repo
  231. APTDIR=bundles/$VERSION/ubuntu/apt
  232. mkdir -p "$APTDIR/conf" "$APTDIR/db"
  233. s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true
  234. cat > "$APTDIR/conf/distributions" <<EOF
  235. Codename: docker
  236. Components: main
  237. Architectures: amd64 i386
  238. EOF
  239. # Add the DEB package to the APT repo
  240. reprepro -b "$APTDIR" includedeb docker "${debfiles[@]}"
  241. # Sign
  242. for F in $(find $APTDIR -name Release); do
  243. gpg -u releasedocker --passphrase "$GPG_PASSPHRASE" \
  244. --armor --sign --detach-sign \
  245. --output "$F.gpg" "$F"
  246. done
  247. # Upload keys
  248. s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/"
  249. gpg --armor --export releasedocker > "bundles/$VERSION/ubuntu/gpg"
  250. s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg"
  251. local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9
  252. if [[ $BUCKET == test* ]]; then
  253. gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6
  254. elif [[ $BUCKET == experimental* ]]; then
  255. gpgFingerprint=E33FF7BF5C91D50A6F91FFFD4CC38D40F9A96B49
  256. fi
  257. # Upload repo
  258. s3cmd --acl-public sync "$APTDIR/" "s3://$BUCKET/ubuntu/"
  259. cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
  260. # Check that HTTPS transport is available to APT
  261. if [ ! -e /usr/lib/apt/methods/https ]; then
  262. apt-get update
  263. apt-get install -y apt-transport-https
  264. fi
  265. # Add the repository to your APT sources
  266. echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
  267. # Then import the repository key
  268. apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys $gpgFingerprint
  269. # Install docker
  270. apt-get update
  271. apt-get install -y lxc-docker
  272. #
  273. # Alternatively, just use the curl-able install.sh script provided at $(s3_url)
  274. #
  275. EOF
  276. # Add redirect at /ubuntu/info for URL-backwards-compatibility
  277. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  278. s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/ubuntu/info"
  279. echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
  280. }
  281. # Upload binaries and tgz files to S3
  282. release_binaries() {
  283. [ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
  284. echo >&2 './hack/make.sh must be run before release_binaries'
  285. exit 1
  286. }
  287. for d in bundles/$VERSION/cross/*/*; do
  288. GOARCH="$(basename "$d")"
  289. GOOS="$(basename "$(dirname "$d")")"
  290. release_build "$GOOS" "$GOARCH"
  291. done
  292. # TODO create redirect from builds/*/i686 to builds/*/i386
  293. cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
  294. # To install, run the following command as root:
  295. curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
  296. # Then start docker in daemon mode:
  297. sudo /usr/local/bin/docker -d
  298. EOF
  299. # Add redirect at /builds/info for URL-backwards-compatibility
  300. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  301. s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info"
  302. if [ -z "$NOLATEST" ]; then
  303. echo "Advertising $VERSION on $BUCKET as most recent version"
  304. echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest"
  305. fi
  306. }
  307. # Upload the index script
  308. release_index() {
  309. sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
  310. }
  311. release_test() {
  312. if [ -e "bundles/$VERSION/test" ]; then
  313. s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
  314. fi
  315. }
  316. setup_gpg() {
  317. # Make sure that we have our keys
  318. mkdir -p "$HOME/.gnupg/"
  319. s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true
  320. gpg --list-keys releasedocker >/dev/null || {
  321. gpg --gen-key --batch <<EOF
  322. Key-Type: RSA
  323. Key-Length: 4096
  324. Passphrase: $GPG_PASSPHRASE
  325. Name-Real: Docker Release Tool
  326. Name-Email: docker@docker.com
  327. Name-Comment: releasedocker
  328. Expire-Date: 0
  329. %commit
  330. EOF
  331. }
  332. }
  333. main() {
  334. build_all
  335. setup_s3
  336. setup_gpg
  337. release_binaries
  338. release_ubuntu
  339. release_index
  340. release_test
  341. }
  342. main
  343. echo
  344. echo
  345. echo "Release complete; see $(s3_url)"
  346. echo