diff --git a/hack/make.sh b/hack/make.sh index ee62fc5e82..b84a437202 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -296,7 +296,7 @@ copy_containerd() { # OS/ARCH of the build target if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then (set -x - if [ -x /usr/local/bin/runc ]; then + if [ -x /usr/local/bin/docker-runc ]; then echo "Copying nested executables into $dir" for file in containerd containerd-shim containerd-ctr runc; do cp "/usr/local/bin/docker-$file" "$dir/" diff --git a/hack/release.sh b/hack/release.sh index 9acf5f5fc7..e9a239c1e0 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -21,26 +21,30 @@ To run, I need: - to be provided with the location of an S3 bucket and path, in environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); - to be provided with AWS credentials for this S3 bucket, in environment - variables AWS_ACCESS_KEY and AWS_SECRET_KEY; + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" docker run -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY=... \ - -e AWS_SECRET_KEY=... \ - -i -t --privileged \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ docker ./hack/release.sh EOF exit 1 } [ "$AWS_S3_BUCKET" ] || usage -[ "$AWS_ACCESS_KEY" ] || usage -[ "$AWS_SECRET_KEY" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage [ -d /go/src/github.com/docker/docker ] || usage cd /go/src/github.com/docker/docker [ -x hack/make.sh ] || usage +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-2} + RELEASE_BUNDLES=( binary cross @@ -75,16 +79,14 @@ fi setup_s3() { echo "Setting up S3" + # TODO: Move to Dockerfile + pip install awscli==1.10.15 # Try creating the bucket. Ignore errors (it might already exist). - s3cmd mb "s3://$BUCKET" 2>/dev/null || true + aws s3 mb "s3://$BUCKET" 2>/dev/null || true # Check access to the bucket. - # s3cmd has no useful exit status, so we cannot check that. - # Instead, we check if it outputs anything on standard output. - # (When there are problems, it uses standard error instead.) - # NOTE: for some reason on debian:jessie `s3cmd info ... | grep -q .` results in a broken pipe - s3cmd info "s3://$BUCKET" | grep . >/dev/null + aws s3 ls "s3://$BUCKET" >/dev/null # Make the bucket accessible through website endpoints. - s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET" + aws s3 website --index-document index --error-document error "s3://$BUCKET" } # write_to_s3 uploads the contents of standard input to the specified S3 url. @@ -92,7 +94,7 @@ write_to_s3() { DEST=$1 F=`mktemp` cat > "$F" - s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST" + aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" rm -f "$F" } @@ -102,6 +104,7 @@ s3_url() { echo "https://$BUCKET_PATH" ;; *) + # TODO: remove s3cmd dependency BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ) if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then echo "$BASE_URL/$AWS_S3_BUCKET_PATH" @@ -147,12 +150,12 @@ upload_release_build() { echo "Uploading $src" echo " to $dst" echo - s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" + aws s3 cp --follow-symlinks --acl public-read "$src" "$dst" if [ "$latest" ]; then echo echo "Copying to $latest" echo - s3cmd --acl-public cp "$dst" "$latest" + aws s3 cp --acl public-read "$dst" "$latest" fi # get hash files too (see hash_files() in hack/make.sh) @@ -162,12 +165,12 @@ upload_release_build() { echo "Uploading $src.$hashAlgo" echo " to $dst.$hashAlgo" echo - s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" + aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" if [ "$latest" ]; then echo echo "Copying to $latest.$hashAlgo" echo - s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" + aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" fi fi done @@ -268,14 +271,14 @@ release_binaries() { cat <