Преглед изворни кода

testing, issue #1766: Add nightly release to docker-ci

Daniel Mizyrycki пре 12 година
родитељ
комит
1dcdc3deb7

+ 56 - 2
hack/infrastructure/docker-ci.rst

@@ -1,5 +1,38 @@
-docker-ci github pull request
-=============================
+docker-ci
+=========
+
+docker-ci is our buildbot continuous integration server,
+building and testing docker, hosted on EC2 and reachable at
+http://docker-ci.dotcloud.com
+
+
+Deployment
+==========
+
+# Load AWS credentials
+export AWS_ACCESS_KEY_ID=''
+export AWS_SECRET_ACCESS_KEY=''
+export AWS_KEYPAIR_NAME=''
+export AWS_SSH_PRIVKEY=''
+
+# Load buildbot credentials and config
+export BUILDBOT_PWD=''
+export IRC_PWD=''
+export IRC_CHANNEL='docker-dev'
+export SMTP_USER=''
+export SMTP_PWD=''
+export EMAIL_RCP=''
+
+# Load registry test credentials
+export REGISTRY_USER=''
+export REGISTRY_PWD=''
+
+cd docker/testing
+vagrant up --provider=aws
+
+
+github pull request
+===================
 
 
 The entire docker pull request test workflow is event driven by github. Its
 The entire docker pull request test workflow is event driven by github. Its
 usage is fully automatic and the results are logged in docker-ci.dotcloud.com
 usage is fully automatic and the results are logged in docker-ci.dotcloud.com
@@ -13,3 +46,24 @@ buildbot (0.8.7p1) was patched using ./testing/buildbot/github.py, so it
 can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0)
 can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0)
 implemented this capability. Also we added a new scheduler to exclusively filter
 implemented this capability. Also we added a new scheduler to exclusively filter
 PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it.
 PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it.
+
+
+nighthly release
+================
+
+The nightly release process is done by buildbot, running a DinD container that downloads
+the docker repository and builds the release container. The resulting
+docker binary is then tested, and if everything is fine the release is done.
+
+Building the release DinD Container
+-----------------------------------
+
+# Log into docker-ci
+ssh ubuntu@docker-ci.dotcloud.com
+cd /data/docker/testing/nightlyrelease
+# Add release_credentials.json as specified in ./Dockerfile
+cat  > release_credentials.json << EOF
+EOF
+sudo docker build -t dockerbuilder .
+# Now that the container is built release_credentials.json is not needed anymore
+git checkout release_credentials.json

+ 24 - 12
testing/Vagrantfile

@@ -2,8 +2,8 @@
 # vi: set ft=ruby :
 # vi: set ft=ruby :
 
 
 BOX_NAME = "docker-ci"
 BOX_NAME = "docker-ci"
-BOX_URI = "http://cloud-images.ubuntu.com/vagrant/raring/current/raring-server-cloudimg-amd64-vagrant-disk1.box"
-AWS_AMI = "ami-10314d79"
+BOX_URI = "http://files.vagrantup.com/precise64.box"
+AWS_AMI = "ami-d0f89fb9"
 DOCKER_PATH = "/data/docker"
 DOCKER_PATH = "/data/docker"
 CFG_PATH = "#{DOCKER_PATH}/testing/buildbot"
 CFG_PATH = "#{DOCKER_PATH}/testing/buildbot"
 on_vbox = File.file?("#{File.dirname(__FILE__)}/.vagrant/machines/default/virtualbox/id") | \
 on_vbox = File.file?("#{File.dirname(__FILE__)}/.vagrant/machines/default/virtualbox/id") | \
@@ -23,16 +23,13 @@ Vagrant::Config.run do |config|
   if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
   if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
     # Add memory limitation capabilities
     # Add memory limitation capabilities
     pkg_cmd = 'sed -Ei \'s/^(GRUB_CMDLINE_LINUX_DEFAULT)=.+/\\1="cgroup_enable=memory swapaccount=1 quiet"/\' /etc/default/grub; '
     pkg_cmd = 'sed -Ei \'s/^(GRUB_CMDLINE_LINUX_DEFAULT)=.+/\\1="cgroup_enable=memory swapaccount=1 quiet"/\' /etc/default/grub; '
-    # Adjust kernel
-    pkg_cmd << "apt-get update -qq; "
-    if on_vbox
-      pkg_cmd << "apt-get install -q -y linux-image-extra-`uname -r`; "
-    else
-      pkg_cmd << "apt-get install -q -y linux-image-generic; "
-    end
+    # Install new kernel
+    pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
 
 
     # Deploy buildbot CI
     # Deploy buildbot CI
-    pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
+    pkg_cmd << "echo 'America/Los_Angeles' > /etc/timezone; " \
+      "dpkg-reconfigure --frontend noninteractive tzdata; " \
+      "apt-get install -q -y python-dev python-pip supervisor; " \
       "pip install -r #{CFG_PATH}/requirements.txt; " \
       "pip install -r #{CFG_PATH}/requirements.txt; " \
       "chown #{USER}.#{USER} /data; cd /data; " \
       "chown #{USER}.#{USER} /data; cd /data; " \
       "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH} #{ENV['BUILDBOT_PWD']} " \
       "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH} #{ENV['BUILDBOT_PWD']} " \
@@ -40,12 +37,27 @@ Vagrant::Config.run do |config|
         "#{ENV['SMTP_PWD']} #{ENV['EMAIL_RCP']}; " \
         "#{ENV['SMTP_PWD']} #{ENV['EMAIL_RCP']}; " \
       "#{CFG_PATH}/setup_credentials.sh #{USER} " \
       "#{CFG_PATH}/setup_credentials.sh #{USER} " \
         "#{ENV['REGISTRY_USER']} #{ENV['REGISTRY_PWD']}; "
         "#{ENV['REGISTRY_USER']} #{ENV['REGISTRY_PWD']}; "
-    # Install docker and testing dependencies
+    # Install docker
+    pkg_cmd << "mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker; " \
+      "wget -q -O - https://get.docker.io/gpg | apt-key add -; " \
+      "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list; " \
+      "apt-get update -qq; apt-get install -yq lxc-docker; "
+    # Enable docker host to run docker daemon in containers (dind)
+    pkg_cmd << "/sbin/stop docker; "
+    pkg_cmd << "DIND_CMD=\"  umount /sys/fs/cgroup/*; umount /sys/fs/cgroup\\n" \
+      "  mount -t tmpfs none /sys/fs/cgroup; cd /sys/fs/cgroup\\n" \
+      "  for C in \x5C\x24(awk '{print \x5C\x241}' < /proc/cgroups | grep -v subsys | grep -v memory);\\n" \
+      "    do mkdir \x5C\x24C; mount -t cgroup none -o \x5C\x24C \x5C\x24C; done;\\n" \
+      "  /usr/bin/docker -d\"; "
+    pkg_cmd << "sed -Ei 's~start on filesystem.+~start on filesystem and started lxc-net~' /etc/init/docker.conf; "
+    pkg_cmd << "sed -Ei 's~    /usr/bin/docker -d~'\"$DIND_CMD\"'~' /etc/init/docker.conf; "
+    # Install testing dependencies
     pkg_cmd << "curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | " \
     pkg_cmd << "curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | " \
       "  tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go; " \
       "  tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go; " \
       "curl -s https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2 | " \
       "curl -s https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2 | " \
       "  tar jx -C /usr/bin --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs; " \
       "  tar jx -C /usr/bin --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs; " \
-      "DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git mercurial aufs-tools make libfontconfig; " \
+      "DEBIAN_FRONTEND=noninteractive apt-get install -qy lxc git mercurial aufs-tools " \
+      " make libfontconfig libevent-dev; " \
       "export GOPATH=/data/docker-dependencies; go get -d github.com/dotcloud/docker; " \
       "export GOPATH=/data/docker-dependencies; go get -d github.com/dotcloud/docker; " \
       "rm -rf ${GOPATH}/src/github.com/dotcloud/docker; "
       "rm -rf ${GOPATH}/src/github.com/dotcloud/docker; "
     # Activate new kernel options
     # Activate new kernel options

+ 10 - 2
testing/buildbot/master.cfg

@@ -45,14 +45,14 @@ c['slavePortnum'] = PORT_MASTER
 
 
 # Schedulers
 # Schedulers
 c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
 c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
-    'index','registry','coverage'])]
+    'index','registry','coverage','nightlyrelease'])]
 c['schedulers'] += [SingleBranchScheduler(name="all",
 c['schedulers'] += [SingleBranchScheduler(name="all",
     change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
     change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
     builderNames=[BUILDER_NAME])]
     builderNames=[BUILDER_NAME])]
 c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
 c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
     change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
     change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
     builderNames=['pullrequest'])]
     builderNames=['pullrequest'])]
-c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage'],
+c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage','nightlyrelease'],
     hour=0, minute=30)]
     hour=0, minute=30)]
 c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
 c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
     hour=range(0,24,4), minute=15)]
     hour=range(0,24,4), minute=15)]
@@ -109,6 +109,14 @@ factory.addStep(ShellCommand(description='index', logEnviron=False,
 c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
 c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
     factory=factory)]
     factory=factory)]
 
 
+# Docker nightly release
+nightlyrelease_cmd = ('docker run -i -t -privileged -lxc-conf="lxc.aa_profile = unconfined"'
+    ' -e AWS_S3_BUCKET="test.docker.io" dockerbuilder')
+factory = BuildFactory()
+factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,usePTY=True,
+    command=nightlyrelease_cmd))
+c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
+    factory=factory)]
 
 
 # Status
 # Status
 authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),
 authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),

+ 47 - 0
testing/nightlyrelease/Dockerfile

@@ -0,0 +1,47 @@
+# VERSION:        1.0
+# DOCKER-VERSION  0.6.1
+# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# DESCRIPTION:    Build docker nightly release using Docker in Docker.
+# REFERENCES:     This code reuses the excellent implementation of docker in docker
+#                 made by Jerome Petazzoni.  https://github.com/jpetazzo/dind
+# COMMENTS:
+#   release_credentials.json is a base64 json encoded file containing:
+#       { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
+#         "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key'
+#         "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
+#         "INDEX_AUTH='Encripted_index_authentication' }
+#   When releasing:  Docker in Docker requires cgroups mounted the same way in
+#   the host and containers:
+#       stop docker
+#       umount /sys/fs/cgroup/*; umount /sys/fs/cgroup; mount -t tmpfs none /sys/fs/cgroup
+#       cd /sys/fs/cgroup; for C in $(awk '{print $1}' < /proc/cgroups | grep -v subsys | grep -v memory) ; do mkdir $C ; mount -t cgroup none -o $C $C ; done
+#       docker -d &
+# TO_BUILD:       docker build -t dockerbuilder .
+# TO_RELEASE:     docker run -i -t -privileged -lxc-conf="lxc.aa_profile = unconfined" -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
+
+from ubuntu:12.04
+maintainer Daniel Mizyrycki <daniel@dotcloud.com>
+
+# Add docker dependencies
+run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
+run apt-get update; apt-get install -y -q iptables ca-certificates bzip2 python lxc curl git mercurial
+run curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | tar -v -C /usr/local -xz
+run ln -s /usr/local/go/bin/go /usr/bin
+
+# Add production docker binary
+run curl http://get.docker.io/builds/Linux/x86_64/docker-latest >/usr/bin/docker; chmod +x /usr/bin/docker
+
+# Add proto docker builder
+add ./dockerbuild /usr/bin/dockerbuild
+run chmod +x /usr/bin/dockerbuild
+
+# Add release credentials
+add ./release_credentials.json /root/release_credentials.json
+
+# Make /tmp and /var/lib/docker inside the container addressable by other containers.
+# This is done to ensure /tmp and /var/lib/docker has AUFS support needed by the inner docker server
+volume /tmp
+volume /var/lib/docker
+
+# Launch build process in a container
+cmd dockerbuild

+ 71 - 0
testing/nightlyrelease/dockerbuild

@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
+# are decoded from /root/release_credentials.json and passed to the environment
+# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
+
+# Enable debugging
+set -x
+
+# Prepare container environment to run docker in docker
+# Mount cgroups
+mount -t tmpfs none /tmp; mount -t tmpfs none /sys/fs/cgroup; cd /sys/fs/cgroup
+for C in $(awk "{print \$1}" < /proc/cgroups | grep -v subsys | grep -v memory) ; do mkdir $C ; mount -t cgroup none -o $C $C ; done
+pushd /proc/self/fd >/dev/null; for FD in *; do case "$FD" in [012]) ;; *) eval exec "$FD>&-" ;; esac done; popd >/dev/null
+
+# Launch docker daemon inside the container
+docker -d &
+
+# fetch docker master branch
+export GOPATH=/go
+rm -rf $GOPATH; mkdir -p $GOPATH
+go get -d github.com/dotcloud/docker
+cd /go/src/github.com/dotcloud/docker
+
+# Add an uncommitted change to generate a timestamped release
+date > timestamp
+
+# Build the docker package and extract docker binary
+docker build -t releasedocker .
+docker run releasedocker sh -c 'cat /go/src/github.com/dotcloud/docker/bundles/*/binary/docker*'  >/docker
+chmod +x /docker
+
+# Swap docker production daemon with new docker binary for testing
+kill $(pgrep '^docker$')
+sleep 15
+mv /docker /usr/bin
+docker -d &
+sleep 15
+
+# Turn debug off to load credentials in the environment and
+# to authenticate to the index
+set +x
+eval $(cat /root/release_credentials.json  | python -c '
+import sys,json,base64;
+d=json.loads(base64.b64decode(sys.stdin.read()));
+exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
+echo '{"https://index.docker.io/v1/":{"auth":"'$INDEX_AUTH'","email":"engineering@dotcloud.com"}}' > /.dockercfg
+set -x
+
+# Test docker nightly
+# Generate unique image name
+export DIMAGE=testimage`date +'%Y%m%d%H%M%S'`
+
+# Simple docker version test
+docker version || exit 1
+
+# Containerized hello world
+docker run -cidfile=hello.cid busybox echo 'Hello world' | grep -q 'Hello world' || exit 1
+
+# Create an image based on the busybox container and test pushing it to the index
+docker commit `cat hello.cid` test/$DIMAGE
+docker images | grep -q test/$DIMAGE || exit 1
+docker push test/$DIMAGE
+
+# Verify the image was properly pushed to the index
+docker search $DIMAGE | grep -q $DIMAGE || exit 1
+
+# Push docker nightly
+echo docker run -i -t -e AWS_S3_BUCKET="$AWS_S3_BUCKET" -e AWS_ACCESS_KEY="XXXXX" -e AWS_SECRET_KEY="XXXXX" -e GPG_PASSPHRASE="XXXXX" releasedocker
+set +x
+docker run -i -t -e AWS_S3_BUCKET="$AWS_S3_BUCKET" -e AWS_ACCESS_KEY="$AWS_ACCESS_KEY" -e AWS_SECRET_KEY="$AWS_SECRET_KEY" -e GPG_PASSPHRASE="$GPG_PASSPHRASE" releasedocker

+ 1 - 0
testing/nightlyrelease/release_credentials.json

@@ -0,0 +1 @@
+eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=