release.sh 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. #!/bin/bash
  2. set -e
  3. # This script looks for bundles built by make.sh, and releases them on a
  4. # public S3 bucket.
  5. #
  6. # Bundles should be available for the VERSION string passed as argument.
  7. #
  8. # The correct way to call this script is inside a container built by the
  9. # official Dockerfile at the root of the Docker source code. The Dockerfile,
  10. # make.sh and release.sh should all be from the same source code revision.
  11. set -o pipefail
  12. # Print a usage message and exit.
  13. usage() {
  14. cat >&2 <<'EOF'
  15. To run, I need:
  16. - to be in a container generated by the Dockerfile at the top of the Docker
  17. repository;
  18. - to be provided with the name of an S3 bucket, in environment variable
  19. AWS_S3_BUCKET;
  20. - to be provided with AWS credentials for this S3 bucket, in environment
  21. variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
  22. - the passphrase to unlock the GPG key which will sign the deb packages
  23. (passed as environment variable GPG_PASSPHRASE);
  24. - a generous amount of good will and nice manners.
  25. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
  26. docker run -e AWS_S3_BUCKET=get-staging.docker.io \
  27. -e AWS_ACCESS_KEY=AKI1234... \
  28. -e AWS_SECRET_KEY=sEs4mE... \
  29. -e GPG_PASSPHRASE=m0resEs4mE... \
  30. -i -t -privileged \
  31. docker ./hack/release.sh
  32. EOF
  33. exit 1
  34. }
  35. [ "$AWS_S3_BUCKET" ] || usage
  36. [ "$AWS_ACCESS_KEY" ] || usage
  37. [ "$AWS_SECRET_KEY" ] || usage
  38. [ "$GPG_PASSPHRASE" ] || usage
  39. [ -d /go/src/github.com/dotcloud/docker ] || usage
  40. cd /go/src/github.com/dotcloud/docker
  41. [ -x hack/make.sh ] || usage
  42. RELEASE_BUNDLES=(
  43. binary
  44. cross
  45. tgz
  46. ubuntu
  47. )
  48. if [ "$1" != '--release-regardless-of-test-failure' ]; then
  49. RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" )
  50. fi
  51. if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
  52. echo >&2
  53. echo >&2 'The build or tests appear to have failed.'
  54. echo >&2
  55. echo >&2 'You, as the release maintainer, now have a couple options:'
  56. echo >&2 '- delay release and fix issues'
  57. echo >&2 '- delay release and fix issues'
  58. echo >&2 '- did we mention how important this is? issues need fixing :)'
  59. echo >&2
  60. echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
  61. echo >&2 ' really knows all the hairy problems at hand with the current release'
  62. echo >&2 ' issues) may bypass this checking by running this script again with the'
  63. echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
  64. echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
  65. echo >&2 ' avoid using this if at all possible.'
  66. echo >&2
  67. echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
  68. echo >&2 ' should be used. If there are release issues, we should always err on the'
  69. echo >&2 ' side of caution.'
  70. echo >&2
  71. exit 1
  72. fi
  73. VERSION=$(cat VERSION)
  74. BUCKET=$AWS_S3_BUCKET
  75. setup_s3() {
  76. # Try creating the bucket. Ignore errors (it might already exist).
  77. s3cmd mb s3://$BUCKET 2>/dev/null || true
  78. # Check access to the bucket.
  79. # s3cmd has no useful exit status, so we cannot check that.
  80. # Instead, we check if it outputs anything on standard output.
  81. # (When there are problems, it uses standard error instead.)
  82. s3cmd info s3://$BUCKET | grep -q .
  83. # Make the bucket accessible through website endpoints.
  84. s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
  85. }
  86. # write_to_s3 uploads the contents of standard input to the specified S3 url.
  87. write_to_s3() {
  88. DEST=$1
  89. F=`mktemp`
  90. cat > $F
  91. s3cmd --acl-public --mime-type='text/plain' put $F $DEST
  92. rm -f $F
  93. }
  94. s3_url() {
  95. case "$BUCKET" in
  96. get.docker.io|test.docker.io)
  97. echo "https://$BUCKET"
  98. ;;
  99. *)
  100. s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
  101. ;;
  102. esac
  103. }
  104. release_build() {
  105. GOOS=$1
  106. GOARCH=$2
  107. BINARY=bundles/$VERSION/cross/$GOOS/$GOARCH/docker-$VERSION
  108. TGZ=bundles/$VERSION/tgz/$GOOS/$GOARCH/docker-$VERSION.tgz
  109. # we need to map our GOOS and GOARCH to uname values
  110. # see https://en.wikipedia.org/wiki/Uname
  111. # ie, GOOS=linux -> "uname -s"=Linux
  112. S3OS=$GOOS
  113. case "$S3OS" in
  114. darwin)
  115. S3OS=Darwin
  116. ;;
  117. freebsd)
  118. S3OS=FreeBSD
  119. ;;
  120. linux)
  121. S3OS=Linux
  122. ;;
  123. *)
  124. echo >&2 "error: can't convert $S3OS to an appropriate value for 'uname -s'"
  125. exit 1
  126. ;;
  127. esac
  128. S3ARCH=$GOARCH
  129. case "$S3ARCH" in
  130. amd64)
  131. S3ARCH=x86_64
  132. ;;
  133. 386)
  134. S3ARCH=i386
  135. ;;
  136. arm)
  137. # GOARCH is fine
  138. ;;
  139. *)
  140. echo >&2 "error: can't convert $S3ARCH to an appropriate value for 'uname -m'"
  141. exit 1
  142. ;;
  143. esac
  144. S3DIR=s3://$BUCKET/builds/$S3OS/$S3ARCH
  145. if [ ! -x "$BINARY" ]; then
  146. echo >&2 "error: can't find $BINARY - was it compiled properly?"
  147. exit 1
  148. fi
  149. if [ ! -f "$TGZ" ]; then
  150. echo >&2 "error: can't find $TGZ - was it packaged properly?"
  151. exit 1
  152. fi
  153. echo "Uploading $BINARY to $S3OS/$S3ARCH/docker-$VERSION"
  154. s3cmd --follow-symlinks --preserve --acl-public put $BINARY $S3DIR/docker-$VERSION
  155. echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz"
  156. s3cmd --follow-symlinks --preserve --acl-public put $TGZ $S3DIR/docker-$VERSION.tgz
  157. if [ -z "$NOLATEST" ]; then
  158. echo "Copying $S3OS/$S3ARCH/docker-$VERSION to $S3OS/$S3ARCH/docker-latest"
  159. s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
  160. echo "Copying $S3OS/$S3ARCH/docker-$VERSION.tgz to $S3OS/$S3ARCH/docker-latest.tgz"
  161. s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz
  162. fi
  163. }
  164. # Upload the 'ubuntu' bundle to S3:
  165. # 1. A full APT repository is published at $BUCKET/ubuntu/
  166. # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
  167. release_ubuntu() {
  168. [ -e bundles/$VERSION/ubuntu ] || {
  169. echo >&2 './hack/make.sh must be run before release_ubuntu'
  170. exit 1
  171. }
  172. # Make sure that we have our keys
  173. mkdir -p /.gnupg/
  174. s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
  175. gpg --list-keys releasedocker >/dev/null || {
  176. gpg --gen-key --batch <<EOF
  177. Key-Type: RSA
  178. Key-Length: 2048
  179. Passphrase: $GPG_PASSPHRASE
  180. Name-Real: Docker Release Tool
  181. Name-Email: docker@dotcloud.com
  182. Name-Comment: releasedocker
  183. Expire-Date: 0
  184. %commit
  185. EOF
  186. }
  187. # Sign our packages
  188. dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
  189. --sign builder bundles/$VERSION/ubuntu/*.deb
  190. # Setup the APT repo
  191. APTDIR=bundles/$VERSION/ubuntu/apt
  192. mkdir -p $APTDIR/conf $APTDIR/db
  193. s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
  194. cat > $APTDIR/conf/distributions <<EOF
  195. Codename: docker
  196. Components: main
  197. Architectures: amd64 i386
  198. EOF
  199. # Add the DEB package to the APT repo
  200. DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
  201. reprepro -b $APTDIR includedeb docker $DEBFILE
  202. # Sign
  203. for F in $(find $APTDIR -name Release); do
  204. gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
  205. --armor --sign --detach-sign \
  206. --output $F.gpg $F
  207. done
  208. # Upload keys
  209. s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
  210. gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
  211. s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
  212. # Upload repo
  213. s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
  214. cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
  215. # Add the repository to your APT sources
  216. echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
  217. # Then import the repository key
  218. apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
  219. # Install docker
  220. apt-get update ; apt-get install -y lxc-docker
  221. #
  222. # Alternatively, just use the curl-able install.sh script provided at $(s3_url)
  223. #
  224. EOF
  225. # Add redirect at /ubuntu/info for URL-backwards-compatibility
  226. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  227. s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
  228. echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
  229. }
  230. # Upload binaries and tgz files to S3
  231. release_binaries() {
  232. [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
  233. echo >&2 './hack/make.sh must be run before release_binaries'
  234. exit 1
  235. }
  236. for d in bundles/$VERSION/cross/*/*; do
  237. GOARCH="$(basename "$d")"
  238. GOOS="$(basename "$(dirname "$d")")"
  239. release_build "$GOOS" "$GOARCH"
  240. done
  241. # TODO create redirect from builds/*/i686 to builds/*/i386
  242. cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
  243. # To install, run the following command as root:
  244. curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
  245. # Then start docker in daemon mode:
  246. sudo /usr/local/bin/docker -d
  247. EOF
  248. # Add redirect at /builds/info for URL-backwards-compatibility
  249. rm -rf /tmp/emptyfile && touch /tmp/emptyfile
  250. s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
  251. if [ -z "$NOLATEST" ]; then
  252. echo "Advertising $VERSION on $BUCKET as most recent version"
  253. echo $VERSION | write_to_s3 s3://$BUCKET/latest
  254. fi
  255. }
  256. # Upload the index script
  257. release_index() {
  258. sed "s,https://get.docker.io/,$(s3_url)/," hack/install.sh | write_to_s3 s3://$BUCKET/index
  259. }
  260. release_test() {
  261. if [ -e "bundles/$VERSION/test" ]; then
  262. s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/
  263. fi
  264. }
  265. main() {
  266. setup_s3
  267. release_binaries
  268. release_ubuntu
  269. release_index
  270. release_test
  271. }
  272. main
  273. echo
  274. echo
  275. echo "Release complete; see $(s3_url)"
  276. echo