diff --git a/api/client/commands.go b/api/client/commands.go
index 839676d276e55298156fea8bb259b0a2b3d56801..2b837596e8b7eb98c8a8ae4f98eaa01a1330e388 100644
--- a/api/client/commands.go
+++ b/api/client/commands.go
@@ -232,6 +232,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
return err
}
}
+
+ // windows: show error message about modified file permissions
+ // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
+ if runtime.GOOS == "windows" {
+ log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
+ }
+
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
diff --git a/api/server/server.go b/api/server/server.go
index d244d2a0ceff5744c9f5c1b0df9dadf18da8b5c4..5b84321ed2390cdee3b93080f590584b3001ec28 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -1578,7 +1578,15 @@ func ServeApi(job *engine.Job) engine.Status {
chErrors <- err
return
}
- chErrors <- srv.Serve()
+ job.Eng.OnShutdown(func() {
+ if err := srv.Close(); err != nil {
+ log.Error(err)
+ }
+ })
+ if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+ err = nil
+ }
+ chErrors <- err
}()
}
diff --git a/daemon/container.go b/daemon/container.go
index e9b360083c1232a33811c8cfba7314988f967407..0fff3238e9fcc1dcd6a36cc2db6e837aeaace2dc 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -1223,6 +1223,7 @@ func (container *Container) initializeNetworking() error {
if err != nil {
return err
}
+ container.HostnamePath = nc.HostnamePath
container.HostsPath = nc.HostsPath
container.ResolvConfPath = nc.ResolvConfPath
container.Config.Hostname = nc.Config.Hostname
diff --git a/daemon/create.go b/daemon/create.go
index e17b63636b1f6934cd4bda1324853f4d23a50894..49bc6a7de9487ba790ed86b53ac49eac27011720 100644
--- a/daemon/create.go
+++ b/daemon/create.go
@@ -33,7 +33,7 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
hostConfig.Memory = 0
}
- if hostConfig.Memory > 0 && !daemon.SystemConfig().SwapLimit {
+ if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
hostConfig.MemorySwap = -1
}
diff --git a/docker/daemon.go b/docker/daemon.go
index e3bd06d901e2872d8802e9cd7bee9002f22767cd..b2a985b2210a7d4cf2b8eb3a991b52743814d8dc 100644
--- a/docker/daemon.go
+++ b/docker/daemon.go
@@ -186,8 +186,9 @@ func mainDaemon() {
errAPI := <-serveAPIWait
// If we have an error here it is unique to API (as daemonErr would have
// exited the daemon process above)
+ eng.Shutdown()
if errAPI != nil {
- log.Errorf("Shutting down due to ServeAPI error: %v", errAPI)
+ log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
}
- eng.Shutdown()
+
}
diff --git a/docs/README.md b/docs/README.md
index 72172112cecf609d8faa347aba92da523384812f..5feb496a736cefa7cde77b5cdcede58d45563021 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,156 +1,255 @@
# Docker Documentation
-The source for Docker documentation is here under `sources/` and uses extended
-Markdown, as implemented by [MkDocs](http://mkdocs.org).
+The source for Docker documentation is in this directory under `sources/`. Our
+documentation uses extended Markdown, as implemented by
+[MkDocs](http://mkdocs.org). The current release of the Docker documentation
+resides on [http://docs.docker.com](http://docs.docker.com).
-The HTML files are built and hosted on
-[http://docs.docker.com](http://docs.docker.com), and update automatically
-after each change to the `docs` branch of [Docker on
-GitHub](https://github.com/docker/docker) thanks to post-commit hooks.
+## Understanding the documentation branches and processes
-## Contributing
+Docker has two primary branches for documentation:
-Be sure to follow the [contribution guidelines](../CONTRIBUTING.md).
-In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
+| Branch | Description | URL (published via commit-hook) |
+|----------|--------------------------------|------------------------------------------------------------------------------|
+| `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) |
+| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
-## Getting Started
+Additions and updates to upcoming releases are made in a feature branch off of
+the `master` branch. The Docker maintainers also support a `docs` branch that
+contains the last release of documentation.
-Docker documentation builds are done in a Docker container, which installs all
-the required tools, adds the local `docs/` directory and builds the HTML docs.
-It then starts a HTTP server on port 8000 so that you can connect and see your
-changes.
+After a release, documentation updates are continually merged into `master` as
+they occur. This work includes new documentation for forthcoming features, bug
+fixes, and other updates. Docker's CI system automatically builds and updates
+the `master` documentation after each merge and posts it to
+[http://docs.master.dockerproject.com](http://docs.master.dockerproject.com).
-In the root of the `docker` source directory:
+Periodically, the Docker maintainers update `docs.docker.com` between official
+releases of Docker. They do this by cherry-picking commits from `master`,
+merging them into `docs`, and then publishing the result.
- $ make docs
- .... (lots of output) ....
- docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
- Running at: http://0.0.0.0:8000/
- Live reload enabled.
- Hold ctrl+c to quit.
+In the rare case where a change is not forward-compatible, changes may be made
+on other branches by special arrangement with the Docker maintainers.
-If you have any issues you need to debug, you can use `make docs-shell` and then
-run `mkdocs serve`
+### Quickstart for documentation contributors
+
+If you are a new or beginner contributor, we encourage you to read through the
+[our detailed contributors
+guide](https://docs.docker.com/project/who-written-for/). The guide explains in
+detail, with examples, how to contribute. If you are an experienced contributor
+this quickstart should be enough to get you started.
+
+The following is the essential workflow for contributing to the documentation:
+
+1. Fork the `docker/docker` repository.
+
+2. Clone the repository to your local machine.
+
+3. Select an issue from `docker/docker` to work on or submit a proposal of your
+own.
+
+4. Create a feature branch from `master` in which to work.
+
+ By basing from `master` your work is automatically included in the next
+ release. It also allows docs maintainers to easily cherry-pick your changes
+ into the `docs` release branch.
+
+4. Modify existing or add new `.md` files to the `docs/sources` directory.
-## Testing the links
+ If you add a new document (`.md`) file, you must also add it to the
+ appropriate section of the `docs/mkdocs.yml` file in this repository.
-You can use `make docs-test` to generate a report of missing links that are referenced in
-the documentation - there should be none.
-## Adding a new document
+5. As you work, build the documentation site locally to see your changes.
-New document (`.md`) files are added to the documentation builds by adding them
-to the menu definition in the `docs/mkdocs.yml` file.
+ The `docker/docker` repository contains a `Dockerfile` and a `Makefile`.
+ Together, these create a development environment in which you can build and
+ run a container running the Docker documentation website. To build the
+ documentation site, enter `make docs` at the root of your `docker/docker`
+ fork:
+
+ $ make docs
+ .... (lots of output) ....
+ docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
+ Running at: http://0.0.0.0:8000/
+ Live reload enabled.
+ Hold ctrl+c to quit.
+
+
+ The build creates an image containing all the required tools, adds the local
+ `docs/` directory and generates the HTML files. Then, it runs a Docker
+ container with this image.
+
+ The container exposes port 8000 on the localhost so that you can connect and
+ see your changes. If you are running Boot2Docker, use the `boot2docker ip`
+ to get the address of your server.
+
+6. Check your writing for style and mechanical errors.
+
+ Use our [documentation style
+ guide](https://docs.docker.com/project/doc-style/) to check style. There are
+ several [good grammar and spelling online
+ checkers](http://www.hemingwayapp.com/) that can check your writing
+ mechanics.
+
+7. Squash your commits on your branch.
+
+8. Make a pull request from your fork back to Docker's `master` branch.
+
+9. Work with the reviewers until your change is approved and merged.
+
+### Debugging and testing
+
+If you have any issues you need to debug, you can use `make docs-shell` and then
+run `mkdocs serve`. You can use `make docs-test` to generate a report of missing
+links that are referenced in the documentation—there should be none.
## Style guide
-If you have questions about how to write for Docker's documentation (e.g.,
-questions about grammar, syntax, formatting, styling, language, or tone) please
-see the [style guide](sources/contributing/docs_style-guide.md). If something
-isn't clear in the guide, please submit a PR to help us improve it.
+If you have questions about how to write for Docker's documentation, please see
+the [style guide](sources/project/doc-style.md). The style guide provides
+guidance about grammar, syntax, formatting, styling, language, or tone. If
+something isn't clear in the guide, please submit an issue to let us know or
+submit a pull request to help us improve it.
-## Working using GitHub's file editor
-Alternatively, for small changes and typos you might want to use GitHub's built-
-in file editor. It allows you to preview your changes right on-line (though
-there can be some differences between GitHub Markdown and [MkDocs
-Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be
-careful not to create many commits. And you must still [sign your
-work!](../CONTRIBUTING.md#sign-your-work)
+## Publishing documentation (for Docker maintainers)
-## Branches
+To publish Docker's documentation you need to have Docker up and running on your
+machine. You'll also need a `docs/awsconfig` file containing the settings you
+need to access the AWS bucket you'll be deploying to.
-| Branch | Description | URL (published via commit-hook) |
-|----------|--------------------------------|------------------------------------------------------------------------------|
-| `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) |
-| `master` | Unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
+The process for publishing is to build first to an AWS bucket, verify the build,
+and then publish the final release.
-**There are two branches related to editing docs**: `master` and `docs`. You
-should always edit the documentation on a local branch of the `master` branch,
-and send a PR against `master`. That way your fixes will automatically get
-included in later releases, and docs maintainers can easily cherry-pick your
-changes into the `docs` release branch. In the rare case where your change is
-not forward-compatible, you may need to base your changes on the `docs` branch.
+1. Have Docker installed and running on your machine.
-Also, since there is a separate `docs` branch, we can keep
-[http://docs.docker.com](http://docs.docker.com) up to date with any bugs found
-between Docker code releases.
+2. Ask the core maintainers for the `awsconfig` file.
-## Publishing Documentation
+3. Copy the `awsconfig` file to the `docs/` directory.
-To publish a copy of the documentation you need to have Docker up and running on
-your machine. You'll also need a `docs/awsconfig` file containing the settings
-you need to access the AWS bucket you'll be deploying to.
+ The `awsconfig` file contains the profiles of the S3 buckets for our
+ documentation sites. (If needed, the release script creates an S3 bucket and
+ pushes the files to it.) Each profile has this format:
-The release script will create an s3 if needed, and will then push the files to it.
+ [profile dowideit-docs]
+ aws_access_key_id = IHOIUAHSIDH234rwf....
+ aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
+ region = ap-southeast-2
- [profile dowideit-docs]
- aws_access_key_id = IHOIUAHSIDH234rwf....
- aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
- region = ap-southeast-2
+ The `profile` name must be the same as the name of the bucket you are
+ deploying to.
-The `profile` name must be the same as the name of the bucket you are deploying
-to - which you call from the `docker` directory:
+4. Call the `make` from the `docker` directory.
- make AWS_S3_BUCKET=dowideit-docs docs-release
+ $ make AWS_S3_BUCKET=dowideit-docs docs-release
-This will publish _only_ to the `http://bucket-url/v1.2/` version of the
-documentation.
+ This publishes _only_ to the `http://bucket-url/v1.2/` version of the
+ documentation.
-If you're publishing the current release's documentation, you need to
-also update the root docs pages by running
+5. If you're publishing the current release's documentation, you need to also
+update the root docs pages by running
- make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
+ $ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
-> **Note:**
-> if you are using Boot2Docker on OSX and the above command returns an error,
-> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
-> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker
-> host. Run `eval "$(boot2docker shellinit)"` to see the correct variable to set. The command
-> will return the full `export` command, so you can just cut and paste.
+### Errors publishing using Boot2Docker
+
+Sometimes, in a Boot2Docker environment, the publishing procedure returns this
+error:
+
+ Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
+ dial unix /var/run/docker.sock: no such file or directory.
+
+If this happens, set the Docker host. Run the following command to set the
+variables in your shell:
+
+ $ eval "$(boot2docker shellinit)"
## Cherry-picking documentation changes to update an existing release.
-Whenever the core team makes a release, they publish the documentation based
-on the `release` branch (which is copied into the `docs` branch). The
-documentation team can make updates in the meantime, by cherry-picking changes
-from `master` into any of the docs branches.
+Whenever the core team makes a release, they publish the documentation based on
+the `release` branch. At that time, the `release` branch is copied into the
+`docs` branch. The documentation team makes updates between Docker releases by
+cherry-picking changes from `master` into any of the documentation branches.
+Typically, we cherry-pick into the `docs` branch.
+
+For example, to update the current release's docs, do the following:
+
+1. Go to your `docker/docker` fork and get the latest from master.
+
+ $ git fetch upstream
+
+2. Checkout a new branch based on `upstream/docs`.
+
+ You should give your new branch a descriptive name.
+
+ $ git checkout -b post-1.2.0-docs-update-1 upstream/docs
+
+3. In a browser window, open [https://github.com/docker/docker/commits/master].
+
+4. Locate the merges you want to publish.
+
+ You should only cherry-pick individual commits; do not cherry-pick merge
+ commits. To minimize merge conflicts, start with the oldest commit and work
+ your way forward in time.
+
+5. Copy the commit SHA from GitHub.
-For example, to update the current release's docs:
+6. Cherry-pick the commit.
+
+ $ git cherry-pick -x fe845c4
+
+7. Repeat until you have cherry-picked everything you want to merge.
- git fetch upstream
- git checkout -b post-1.2.0-docs-update-1 upstream/docs
- # Then go through the Merge commit linked to PR's (making sure they apply
- to that release)
- # see https://github.com/docker/docker/commits/master
- git cherry-pick -x fe845c4
- # Repeat until you have cherry picked everything you will propose to be merged
- git push upstream post-1.2.0-docs-update-1
+8. Push your changes to your fork.
-Then make a pull request to merge into the `docs` branch, __NOT__ into master.
+ $ git push origin post-1.2.0-docs-update-1
-Once the PR has the needed `LGTM`s, merge it, then publish to our beta server
-to test:
+9. Make a pull request to merge into the `docs` branch.
- git fetch upstream
- git checkout docs
- git reset --hard upstream/docs
- make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
+ Do __NOT__ merge into `master`.
-Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/
-to view your results and make sure what you published is what you wanted.
+10. Have maintainers review your pull request.
-When you're happy with it, publish the docs to our live site:
+11. Once the PR has the needed "LGTMs", merge it on GitHub.
- make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release
+12. Return to your local fork and make sure you are still on the `docs` branch.
-Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/
+ $ git checkout docs
+
+13. Fetch your merged pull request from `docs`.
+
+ $ git fetch upstream/docs
+
+14. Ensure your branch is clean and set to the latest.
+
+ $ git reset --hard upstream/docs
-Note that the new docs will not appear live on the site until the cache (a complex,
-distributed CDN system) is flushed. The `make docs-release` command will do this
-_if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID (ask the meta
-team) - this will take at least 15 minutes to run and you can check its progress
-with the CDN Cloudfront Chrome addin.
+15. Copy the `awsconfig` file into the `docs` directory.
+
+16. Make the beta documentation
+
+ $ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
+
+17. Open [the beta
+website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site
+and make sure what you published is correct.
+
+19. When you're happy with your content, publish the docs to our live site:
+
+ $ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes
+DISTRIBUTION_ID=C2K6......FL2F docs-release
+
+20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/]
+
+
+### Caching and the docs
+
+New docs do not appear live on the site until the cache (a complex, distributed
+CDN system) is flushed. The `make docs-release` command flushes the cache _if_
+the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush
+can take at least 15 minutes to run and you can check its progress with the CDN
+Cloudfront Purge Tool Chrome app.
## Removing files from the docs.docker.com site
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 710359d3d633bb001dfffcc389aef5fb39ed3471..6e7be67d20ec61ff3babaa04aaa583db55291b82 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -46,6 +46,7 @@ pages:
- ['installation/gentoolinux.md', 'Installation', 'Gentoo']
- ['installation/softlayer.md', 'Installation', 'IBM Softlayer']
- ['installation/joyent.md', 'Installation', 'Joyent Compute Service']
+- ['installation/azure.md', 'Installation', 'Microsoft Azure']
- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud']
- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux']
- ['installation/oracle.md', 'Installation', 'Oracle Linux']
diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md
index 164018e82770685820a5021acf5326a205bbbcc2..1613ad1d4b0e7b2bf08f5b29cb625e83e3cbead3 100644
--- a/docs/sources/docker-hub/builds.md
+++ b/docs/sources/docker-hub/builds.md
@@ -63,13 +63,15 @@ public or private GitHub repositories with a `Dockerfile`.
### GitHub Submodules
-If your GitHub repository contains links to private submodules, you'll
-need to add a deploy key from your Docker Hub repository.
+If your GitHub repository contains links to private submodules, you'll get an
+error message in your build.
-Your Docker Hub deploy key is located under the "Build Details"
-menu on the Automated Build's main page in the Hub. Add this key
-to your GitHub submodule by visiting the Settings page for the
-repository on GitHub and selecting "Deploy keys".
+Normally, the Docker Hub sets up a deploy key in your GitHub repository.
+Unfortunately, GitHub only allows a repository deploy key to access a single repository.
+
+To work around this, you need to create a dedicated user account in GitHub and attach
+the automated build's deploy key that account. This dedicated build account
+can be limited to read-only access to just the repositories required to build.
@@ -82,15 +84,33 @@ repository on GitHub and selecting "Deploy keys".
1. |
-  |
- Your automated build's deploy key is in the "Build Details" menu
-under "Deploy keys". |
+  |
+ First, create the new account in GitHub. It should be given read-only
+ access to the main repository and all submodules that are needed. |
2. |
-  |
- In your GitHub submodule's repository Settings page, add the
-deploy key from your Docker Hub Automated Build. |
+  |
+ This can be accomplished by adding the account to a read-only team in
+ the organization(s) where the main GitHub repository and all submodule
+ repositories are kept. |
+
+
+ 3. |
+  |
+ Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section. |
+
+
+ 4. |
+  |
+ Your automated build's deploy key is in the "Build Details" menu
+ under "Deploy keys". |
+
+
+ 5. |
+  |
+ In your dedicated GitHub User account, add the deploy key from your
+ Docker Hub Automated Build. |
diff --git a/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png b/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png
new file mode 100644
index 0000000000000000000000000000000000000000..7d0092170f97ec2cb67089e54585dd120dcb5b3c
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png differ
diff --git a/docs/sources/docker-hub/hub-images/gh_org_members.png b/docs/sources/docker-hub/hub-images/gh_org_members.png
new file mode 100644
index 0000000000000000000000000000000000000000..465f5da565c8ca58b662eeeaba63ee21e63a8c53
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_org_members.png differ
diff --git a/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png b/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png
new file mode 100644
index 0000000000000000000000000000000000000000..983b5eec77f2360a2c179e722e2611917443a025
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png differ
diff --git a/docs/sources/docker-hub/hub-images/gh_team_members.png b/docs/sources/docker-hub/hub-images/gh_team_members.png
new file mode 100644
index 0000000000000000000000000000000000000000..3bdf4abd95c65e1b9b2b70862fc16968de1e2a65
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_team_members.png differ
diff --git a/docs/sources/docker-hub/hub-images/github_deploy_key.png b/docs/sources/docker-hub/hub-images/github_deploy_key.png
deleted file mode 100644
index a0ec6a918f93874f49fe571315b46ed7ebe11961..0000000000000000000000000000000000000000
Binary files a/docs/sources/docker-hub/hub-images/github_deploy_key.png and /dev/null differ
diff --git a/docs/sources/installation/azure.md b/docs/sources/installation/azure.md
new file mode 100644
index 0000000000000000000000000000000000000000..a8e700fead838ef4cbb342136b42ff1ba9bf5a45
--- /dev/null
+++ b/docs/sources/installation/azure.md
@@ -0,0 +1,27 @@
+page_title: Installation on Microsoft Azure Platform
+page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform.
+page_keywords: Docker, Docker documentation, installation, azure, microsoft
+
+# Microsoft Azure
+
+
+## Creating a Docker host machine on Azure
+
+Please check out to the following detailed tutorials on [Microsoft Azure][0]
+website to find out different ways to create a Docker-ready Linux virtual
+machines on Azure:
+
+* [Docker Virtual Machine Extensions on Azure][1]
+ * [How to use the Docker VM Extension from Azure Cross-Platform Interface][2]
+ * [How to use the Docker VM Extension with the Azure Portal][3]
+* [Using Docker Machine with Azure][4]
+
+## What next?
+
+Continue with the [User Guide](/userguide/).
+
+[0]: http://azure.microsoft.com/
+[1]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/
+[2]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-xplat-cli/
+[3]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-portal/
+[4]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/
\ No newline at end of file
diff --git a/docs/sources/project/create-pr.md b/docs/sources/project/create-pr.md
index 84de397090e51fd983198d5ee29437c6a5e59f5a..197aee849d0bfd3035d0c5982f1abae005dfb263 100644
--- a/docs/sources/project/create-pr.md
+++ b/docs/sources/project/create-pr.md
@@ -1,6 +1,6 @@
page_title: Create a pull request (PR)
page_description: Basic workflow for Docker contributions
-page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
+page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Create a pull request (PR)
diff --git a/docs/sources/project/find-an-issue.md b/docs/sources/project/find-an-issue.md
index 39572d17a43ec0300adfd01d5043e1c9138bcfa3..0a36c8833a0b54c78dd54b45eb4b0992c04009ee 100644
--- a/docs/sources/project/find-an-issue.md
+++ b/docs/sources/project/find-an-issue.md
@@ -1,6 +1,6 @@
page_title: Make a project contribution
page_description: Basic workflow for Docker contributions
-page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
+page_keywords: contribute, pull request, review, workflow, beginner, expert, squash, commit
@@ -37,20 +51,44 @@ An existing issue is something reported by a Docker user. As issues come in,
our maintainers triage them. Triage is its own topic. For now, it is important
for you to know that triage includes ranking issues according to difficulty.
-Triaged issues have either a white-belt
-or black-belt label.
-A white-belt issue is considered
-an easier issue. Issues can have more than one label, for example,
-bug,
-improvement,
-project/doc, and so forth.
-These other labels are there for filtering purposes but you might also find
-them helpful.
-
-
-## Claim a white-belt issue
-
-In this section, you find and claim an open white-belt issue.
+Triaged issues have one of these labels:
+
+
+
+ Level |
+ Experience level guideline |
+
+
+ exp/beginner |
+ You have made less than 10 contributions in your life time to any open source project. |
+
+
+ exp/novice |
+ You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. |
+
+
+ exp/proficient |
+ You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. |
+
+
+ exp/expert |
+ You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. |
+
+
+ exp/master |
+ You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. |
+
+
+
+As the table states, these labels are meant as guidelines. You might have
+written a whole plugin for Docker in a personal project and never contributed to
+Docker. With that kind of experience, you could take on an exp/expert or exp/master level task.
+
+## Claim a beginner or novice issue
+
+In this section, you find and claim an open documentation lines issue.
1. Go to the `docker/docker` white-belt items on the list.
+3. Look for the exp/beginner items on the list.
-4. Click on the "labels" dropdown and select white-belt.
+4. Click on the "labels" dropdown and select exp/beginner.
- The system filters to show only open white-belt issues.
+ The system filters to show only open exp/beginner issues.
5. Open an issue that interests you.
@@ -75,21 +113,18 @@ In this section, you find and claim an open white-belt issue.
6. Make sure that no other user has chosen to work on the issue.
- We don't allow external contributors to assign issues to themselves, so you
- need to read the comments to find if a user claimed an issue by saying:
-
- - "I'd love to give this a try~"
- - "I'll work on this!"
- - "I'll take this."
-
- The community is very good about claiming issues explicitly.
+ We don't allow external contributors to assign issues to themselves. So, you
+ need to read the comments to find if a user claimed the issue by leaving a
+ `#dibs` comment on the issue.
-7. When you find an open issue that both interests you and is unclaimed, claim it yourself by adding a comment.
+7. When you find an open issue that both interests you and is unclaimed, add a
+`#dibs` comment.

This example uses issue 11038. Your issue # will be different depending on
- what you claimed.
+ what you claimed. After a moment, Gordon the Docker bot, changes the issue
+ status to claimed.
8. Make a note of the issue number; you'll need it later.
diff --git a/docs/sources/project/images/easy_issue.png b/docs/sources/project/images/easy_issue.png
index ac2ea6879ca156cac6cf19f5ad9663ef0eb28c63..de44b7826dc13cdf5d6b95f726dc519bc5cee14f 100644
Binary files a/docs/sources/project/images/easy_issue.png and b/docs/sources/project/images/easy_issue.png differ
diff --git a/docs/sources/project/make-a-contribution.md b/docs/sources/project/make-a-contribution.md
index b6fc4f34fa6f5622cc17b5071893f67e6e60ffa3..e0b4e897201808e726fdde97fd791235328ea9ab 100644
--- a/docs/sources/project/make-a-contribution.md
+++ b/docs/sources/project/make-a-contribution.md
@@ -16,7 +16,7 @@ process simple so you'll want to contribute frequently.
## The basic contribution workflow
In this guide, you work through Docker's basic contribution workflow by fixing a
-single *white-belt* issue in the `docker/docker` repository. The workflow
+single *beginner* issue in the `docker/docker` repository. The workflow
for fixing simple issues looks like this:

diff --git a/docs/sources/project/review-pr.md b/docs/sources/project/review-pr.md
index 44ad84f2a0eebd049e568d9376111ef2a56d7114..e8cb6c7c0432dfc5cf1038422504951c08a33077 100644
--- a/docs/sources/project/review-pr.md
+++ b/docs/sources/project/review-pr.md
@@ -1,6 +1,6 @@
page_title: Participate in the PR Review
page_description: Basic workflow for Docker contributions
-page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
+page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Participate in the PR Review
@@ -117,8 +117,7 @@ see the GitHub help on deleting branches.
## Where to go next
At this point, you have completed all the basic tasks in our contributors guide.
-If you enjoyed contributing, let us know by completing another
-white-belt
+If you enjoyed contributing, let us know by completing another beginner
issue or two. We really appreciate the help.
If you are very experienced and want to make a major change, go on to
diff --git a/docs/sources/project/set-up-git.md b/docs/sources/project/set-up-git.md
index ba42c8100667d5e69e24b84852c56466069fa67e..2292d93b3c6c3f95ca4902d36fd2d73e6f3d0532 100644
--- a/docs/sources/project/set-up-git.md
+++ b/docs/sources/project/set-up-git.md
@@ -138,7 +138,7 @@ As you change code in your fork, you make your changes on a repository branch.
The branch name should reflect what you are working on. In this section, you
create a branch, make a change, and push it up to your fork.
-This branch is just for testing your config for this guide. The changes arepart
+This branch is just for testing your config for this guide. The changes are part
of a dry run so the branch name is going to be dry-run-test. To create an push
the branch to your fork on GitHub:
diff --git a/docs/sources/project/work-issue.md b/docs/sources/project/work-issue.md
index 68d2ed750fc04733526c3f6dea9485f49f580b93..190cec0557203a2c7842f8ba019f1cf7fc732619 100644
--- a/docs/sources/project/work-issue.md
+++ b/docs/sources/project/work-issue.md
@@ -1,6 +1,6 @@
page_title: Work on your issue
page_description: Basic workflow for Docker contributions
-page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
+page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Work on your issue
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index 322f5f401e4cf314ec0b4da79729fd262dff957f..93be377baf91fdf56e9a6b802d58178b7f0cd1fc 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -1514,14 +1514,6 @@ just a specific mapping:
$ sudo docker port test 7890
0.0.0.0:4321
-## rename
-
- Usage: docker rename OLD_NAME NEW_NAME
-
- rename a existing container to a NEW_NAME
-
-The `docker rename` command allows the container to be renamed to a different name.
-
## ps
Usage: docker ps [OPTIONS]
@@ -1617,6 +1609,14 @@ use `docker pull`:
Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com)
registry or to a self-hosted one.
+## rename
+
+ Usage: docker rename OLD_NAME NEW_NAME
+
+ rename a existing container to a NEW_NAME
+
+The `docker rename` command allows the container to be renamed to a different name.
+
## restart
Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...]
diff --git a/engine/engine.go b/engine/engine.go
index e8286d89f7dc047706e3c94618c3ff98c908d995..84292ad2bd11234b9a574009742aa87becb6fbea 100644
--- a/engine/engine.go
+++ b/engine/engine.go
@@ -46,18 +46,19 @@ func unregister(name string) {
// It acts as a store for *containers*, and allows manipulation of these
// containers by executing *jobs*.
type Engine struct {
- handlers map[string]Handler
- catchall Handler
- hack Hack // data for temporary hackery (see hack.go)
- id string
- Stdout io.Writer
- Stderr io.Writer
- Stdin io.Reader
- Logging bool
- tasks sync.WaitGroup
- l sync.RWMutex // lock for shutdown
- shutdown bool
- onShutdown []func() // shutdown handlers
+ handlers map[string]Handler
+ catchall Handler
+ hack Hack // data for temporary hackery (see hack.go)
+ id string
+ Stdout io.Writer
+ Stderr io.Writer
+ Stdin io.Reader
+ Logging bool
+ tasks sync.WaitGroup
+ l sync.RWMutex // lock for shutdown
+ shutdownWait sync.WaitGroup
+ shutdown bool
+ onShutdown []func() // shutdown handlers
}
func (eng *Engine) Register(name string, handler Handler) error {
@@ -143,6 +144,7 @@ func (eng *Engine) Job(name string, args ...string) *Job {
func (eng *Engine) OnShutdown(h func()) {
eng.l.Lock()
eng.onShutdown = append(eng.onShutdown, h)
+ eng.shutdownWait.Add(1)
eng.l.Unlock()
}
@@ -156,6 +158,7 @@ func (eng *Engine) Shutdown() {
eng.l.Lock()
if eng.shutdown {
eng.l.Unlock()
+ eng.shutdownWait.Wait()
return
}
eng.shutdown = true
@@ -180,17 +183,15 @@ func (eng *Engine) Shutdown() {
// Call shutdown handlers, if any.
// Timeout after 10 seconds.
- var wg sync.WaitGroup
for _, h := range eng.onShutdown {
- wg.Add(1)
go func(h func()) {
- defer wg.Done()
h()
+ eng.shutdownWait.Done()
}(h)
}
done := make(chan struct{})
go func() {
- wg.Wait()
+ eng.shutdownWait.Wait()
close(done)
}()
select {
diff --git a/graph/pull.go b/graph/pull.go
index c01152a2485e65d70d4bc0338b7ada29b0ee803e..adad6f3239abf68d0eb60b8ab8455493f9c5d74d 100644
--- a/graph/pull.go
+++ b/graph/pull.go
@@ -74,7 +74,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
logName = utils.ImageReference(logName, tag)
}
- if len(repoInfo.Index.Mirrors) == 0 && ((repoInfo.Official && repoInfo.Index.Official) || endpoint.Version == registry.APIVersion2) {
+ if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
if repoInfo.Official {
j := job.Eng.Job("trust_update_base")
if err = j.Run(); err != nil {
diff --git a/graph/push.go b/graph/push.go
index 5a4f0d1de93b2f62b6f57636c7d057578feeeb74..f86df6d0b3c8cbcb5c319533f11f5785bb20a30f 100644
--- a/graph/push.go
+++ b/graph/push.go
@@ -539,7 +539,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
}
- if endpoint.Version == registry.APIVersion2 {
+ if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf)
if err == nil {
return engine.StatusOK
diff --git a/hack/vendor.sh b/hack/vendor.sh
index b3ba928a05aa44e6f09a7e6f12347179fb6c57b2..8732224bfc10570e4398805300cc020910618ae7 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -53,7 +53,7 @@ clone hg code.google.com/p/gosqlite 74691fb6f837
clone git github.com/docker/libtrust 230dfd18c232
-clone git github.com/Sirupsen/logrus v0.6.6
+clone git github.com/Sirupsen/logrus v0.7.1
clone git github.com/go-fsnotify/fsnotify v1.0.4
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 6a8359574686d542318af06fb1b845b03854a7e6..7f5a879b1f20da2588066142f16c80f7d450b13e 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -4623,8 +4623,19 @@ func TestBuildStderr(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if stderr != "" {
- t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
+
+ if runtime.GOOS == "windows" {
+ // stderr might contain a security warning on windows
+ lines := strings.Split(stderr, "\n")
+ for _, v := range lines {
+ if v != "" && !strings.Contains(v, "SECURITY WARNING:") {
+ t.Fatalf("Stderr contains unexpected output line: %q", v)
+ }
+ }
+ } else {
+ if stderr != "" {
+ t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
+ }
}
logDone("build - testing stderr")
}
@@ -5113,9 +5124,13 @@ func TestBuildSpaces(t *testing.T) {
t.Fatal("Build 2 was supposed to fail, but didn't")
}
+ removeLogTimestamps := func(s string) string {
+ return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
+ }
+
// Skip over the times
- e1 := err1.Error()[strings.Index(err1.Error(), `level=`):]
- e2 := err2.Error()[strings.Index(err1.Error(), `level=`):]
+ e1 := removeLogTimestamps(err1.Error())
+ e2 := removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
@@ -5128,8 +5143,8 @@ func TestBuildSpaces(t *testing.T) {
}
// Skip over the times
- e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
- e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
+ e1 = removeLogTimestamps(err1.Error())
+ e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
@@ -5142,8 +5157,8 @@ func TestBuildSpaces(t *testing.T) {
}
// Skip over the times
- e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
- e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
+ e1 = removeLogTimestamps(err1.Error())
+ e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 49b43c2f28d6153de4d148940b7dc2c2e31b4959..c515a637875006d6f21d1e77ab67706f417d7446 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -800,3 +800,31 @@ func TestDaemonDots(t *testing.T) {
logDone("daemon - test dots on INFO")
}
+
+func TestDaemonUnixSockCleanedUp(t *testing.T) {
+ d := NewDaemon(t)
+ dir, err := ioutil.TempDir("", "socket-cleanup-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ sockPath := filepath.Join(dir, "docker.sock")
+ if err := d.Start("--host", "unix://"+sockPath); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := os.Stat(sockPath); err != nil {
+ t.Fatal("socket does not exist")
+ }
+
+ if err := d.Stop(); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) {
+ t.Fatal("unix socket is not cleaned up")
+ }
+
+ logDone("daemon - unix socket is cleaned up")
+}
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
index 926e763434c443c5c140aa5994f03199c17bdeca..c55bd2e67350fce11af9e6a3918c34bcafae238e 100644
--- a/integration-cli/docker_cli_pull_test.go
+++ b/integration-cli/docker_cli_pull_test.go
@@ -55,6 +55,8 @@ func TestPullImageWithAliases(t *testing.T) {
// pulling library/hello-world should show verified message
func TestPullVerified(t *testing.T) {
+ t.Skip("problems verifying library/hello-world (to be fixed)")
+
// Image must be pulled from central repository to get verified message
// unless keychain is manually updated to contain the daemon's sign key.
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index 3cab284331e1858fb3fc89915950637fe1518d07..636ef36e12759d48450d318a1a816dd9dccd35b8 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -412,6 +412,31 @@ func TestRunLinkToContainerNetMode(t *testing.T) {
logDone("run - link to a container which net mode is container success")
}
+func TestRunModeNetContainerHostname(t *testing.T) {
+ defer deleteAllContainers()
+ cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top")
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ t.Fatalf("failed to run container: %v, output: %q", err, out)
+ }
+ cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname")
+ out, _, err = runCommandWithOutput(cmd)
+ if err != nil {
+ t.Fatalf("failed to exec command: %v, output: %q", err, out)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
+ out1, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ t.Fatalf("failed to run container: %v, output: %q", err, out1)
+ }
+ if out1 != out {
+ t.Fatal("containers with shared net namespace should have same hostname")
+ }
+
+ logDone("run - containers with shared net namespace have same hostname")
+}
+
// Regression test for #4741
func TestRunWithVolumesAsFiles(t *testing.T) {
defer deleteAllContainers()
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 6fe416f9d20eb5bf1991a41447a713173378899a..9327ac240a579da5e6f2757c81dd7e5c4cf1627d 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -109,23 +109,6 @@ func TestRunWithUlimits(t *testing.T) {
logDone("run - ulimits are set")
}
-func getCgroupPaths(test string) map[string]string {
- cgroupPaths := map[string]string{}
- for _, line := range strings.Split(test, "\n") {
- line = strings.TrimSpace(line)
- if line == "" {
- continue
- }
- parts := strings.Split(line, ":")
- if len(parts) != 3 {
- fmt.Printf("unexpected file format for /proc/self/cgroup - %q\n", line)
- continue
- }
- cgroupPaths[parts[1]] = parts[2]
- }
- return cgroupPaths
-}
-
func TestRunContainerWithCgroupParent(t *testing.T) {
testRequires(t, NativeExecDriver)
defer deleteAllContainers()
@@ -135,7 +118,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
if err != nil {
t.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
- selfCgroupPaths := getCgroupPaths(string(data))
+ selfCgroupPaths := parseCgroupPaths(string(data))
selfCpuCgroup, found := selfCgroupPaths["memory"]
if !found {
t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
@@ -145,7 +128,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
if err != nil {
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
- cgroupPaths := getCgroupPaths(string(out))
+ cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
t.Fatalf("unexpected output - %q", string(out))
}
@@ -173,7 +156,7 @@ func TestRunContainerWithCgroupParentAbsPath(t *testing.T) {
if err != nil {
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
- cgroupPaths := getCgroupPaths(string(out))
+ cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
t.Fatalf("unexpected output - %q", string(out))
}
diff --git a/integration-cli/utils.go b/integration-cli/utils.go
index 691402f35eeea1c880bcfa40d891d52c85cfc1bf..75608549892ffa8b2e21f68ace9a8ae873f1da4f 100644
--- a/integration-cli/utils.go
+++ b/integration-cli/utils.go
@@ -328,3 +328,17 @@ func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
}
}
}
+
+// Parses 'procCgroupData', which is output of '/proc//cgroup', and returns
+// a map which cgroup name as key and path as value.
+func parseCgroupPaths(procCgroupData string) map[string]string {
+ cgroupPaths := map[string]string{}
+ for _, line := range strings.Split(procCgroupData, "\n") {
+ parts := strings.Split(line, ":")
+ if len(parts) != 3 {
+ continue
+ }
+ cgroupPaths[parts[1]] = parts[2]
+ }
+ return cgroupPaths
+}
diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md
index bee7a89827866230e2ca0a103aeaa099e6f4e3ff..528dea9c6017eef30e85d81c04318f7430d89628 100644
--- a/project/ISSUE-TRIAGE.md
+++ b/project/ISSUE-TRIAGE.md
@@ -54,30 +54,51 @@ that the user can easily script and know the reason why the command failed.
### Step 3: Classify the Issue
Classifications help both to inform readers about an issue's priority and how to resolve it.
-This is also helpful for identifying new, critical issues. Classifications types are
-applied to the issue or pull request using labels.
+This is also helpful for identifying new, critical issues. "Kinds of" are
+applied to the issue or pull request using labels. You can apply one or more labels.
-Types of classification:
+Kinds of classifications:
-| Type | Description |
-|-------------|---------------------------------------------------------------------------------------------------------------------------------|
-| improvement | improvements are not bugs or new features but can drastically improve usability. |
-| regression | regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. |
-| bug | bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. |
-| feature | features are new and shinny. They are things that the project does not currently support. |
+| Kind | Description |
+|------------------|---------------------------------------------------------------------------------------------------------------------------------|
+| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. |
+| kind/cleanup | Refactoring code or otherwise clarifying documentation. |
+| kind/content | Content that is not documentation such as help or error messages. |
+| kind/graphics | Work involving graphics skill |
+| kind/regression | Regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. |
+| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. |
+| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. |
+| kind/question | Contains a user or contributor question requiring a response. |
+| kind/usecase | A description of a user or contributor situation requiring a response perhaps in code or documentation. |
+| kind/writing | Writing documentation, man pages, articles, blogs, or other significant word-driven task. |
+| kind/test | Tests or test infrastructure needs adding or updating. |
-### Step 4: Estimate the Difficulty
-Difficulty is a way for a contributor to find an issue based on their skill set. Difficulty types are
-applied to the issue or pull request using labels.
+Contributors can add labels by using a `+kind/bug` in an issue or pull request comment.
-Difficulty
+### Step 4: Estimate the experience level required
+
+Experience level is a way for a contributor to find an issue based on their
+skill set. Experience types are applied to the issue or pull request using
+labels.
+
+| Level | Experience level guideline |
+|------------------|--------------------------------------------------------------------------------------------------------------------------|
+| exp/beginner | You have made less than 10 contributions in your life time to any open source project. |
+| exp/novice | You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. |
+| exp/proficient | You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. |
+| exp/expert | You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. |
+| exp/master | You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. |
+
+As the table states, these labels are meant as guidelines. You might have
+written a whole plugin for Docker in a personal project and never contributed to
+Docker. With that kind of experience, you could take on an exp/expert or exp/master level task.
+
+Contributors can add labels by using a `+exp/expert` format in issue comment.
-| Type | Description |
-|--------------|-----------------------------------------------------------------------------|
-| white-belt | Simple, non-time consuming issue, easy first task to accomplish |
-| black-belt | Expert at the subject matter or someone who likes pain |
And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue.
diff --git a/registry/session_v2.go b/registry/session_v2.go
index ec628ad1158631c31d12c619e3cfec0fff032b36..833abeed6fb56ce05b4a460593ee7ea03cf79f24 100644
--- a/registry/session_v2.go
+++ b/registry/session_v2.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net/http"
"strconv"
log "github.com/Sirupsen/logrus"
@@ -212,29 +213,14 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str
// 'layer' is an uncompressed reader of the blob to be pushed.
// The server will generate it's own checksum calculation.
func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error {
- routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)
- if err != nil {
- return err
- }
-
- log.Debugf("[registry] Calling %q %s", "POST", routeURL)
- req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
- if err != nil {
- return err
- }
-
- if err := auth.Authorize(req); err != nil {
- return err
- }
- res, _, err := r.doRequest(req)
+ location, err := r.initiateBlobUpload(ep, imageName, auth)
if err != nil {
return err
}
- location := res.Header.Get("Location")
method := "PUT"
log.Debugf("[registry] Calling %q %s", method, location)
- req, err = r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
+ req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
if err != nil {
return err
}
@@ -244,7 +230,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string
if err := auth.Authorize(req); err != nil {
return err
}
- res, _, err = r.doRequest(req)
+ res, _, err := r.doRequest(req)
if err != nil {
return err
}
@@ -265,6 +251,51 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string
return nil
}
+// initiateBlobUpload gets the blob upload location for the given image name.
+func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) {
+ routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)
+ if err != nil {
+ return "", err
+ }
+
+ log.Debugf("[registry] Calling %q %s", "POST", routeURL)
+ req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
+ if err != nil {
+ return "", err
+ }
+
+ if err := auth.Authorize(req); err != nil {
+ return "", err
+ }
+ res, _, err := r.doRequest(req)
+ if err != nil {
+ return "", err
+ }
+
+ if res.StatusCode != http.StatusAccepted {
+ if res.StatusCode == http.StatusUnauthorized {
+ return "", errLoginRequired
+ }
+ if res.StatusCode == http.StatusNotFound {
+ return "", ErrDoesNotExist
+ }
+
+ errBody, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", err
+ }
+
+ log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
+ return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res)
+ }
+
+ if location = res.Header.Get("Location"); location == "" {
+ return "", fmt.Errorf("registry did not return a Location header for resumable blob upload for image %s", imageName)
+ }
+
+ return
+}
+
// Finally Push the (signed) manifest of the blobs we've just pushed
func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) {
routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md
index e755e7c18028a9ed6b89e237c76ec2eb9e73b70d..512f26e5eab242da050d75698083f4d1bc93db9c 100644
--- a/vendor/src/github.com/Sirupsen/logrus/README.md
+++ b/vendor/src/github.com/Sirupsen/logrus/README.md
@@ -82,7 +82,7 @@ func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(&logrus_airbrake.AirbrakeHook{})
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
@@ -164,43 +164,8 @@ You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
-```go
-// Not the real implementation of the Airbrake hook. Just a simple sample.
-import (
- log "github.com/Sirupsen/logrus"
-)
-
-func init() {
- log.AddHook(new(AirbrakeHook))
-}
-
-type AirbrakeHook struct{}
-
-// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
-// the fields for the entry. See the Fields section of the README.
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
- err := airbrake.Notify(entry.Data["error"].(error))
- if err != nil {
- log.WithFields(log.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- }).Info("Failed to send error to Airbrake")
- }
-
- return nil
-}
-
-// `Levels()` returns a slice of `Levels` the hook is fired for.
-func (hook *AirbrakeHook) Levels() []log.Level {
- return []log.Level{
- log.ErrorLevel,
- log.FatalLevel,
- log.PanicLevel,
- }
-}
-```
-
-Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
```go
import (
@@ -211,7 +176,7 @@ import (
)
func init() {
- log.AddHook(new(logrus_airbrake.AirbrakeHook))
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
@@ -233,6 +198,9 @@ func init() {
Send errors to remote syslog server.
Uses standard library `log/syslog` behind the scenes.
+* [`github.com/Sirupsen/logrus/hooks/bugsnag`](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go)
+ Send errors to the Bugsnag exception tracking service.
+
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
Send errors to a channel in hipchat.
@@ -321,6 +289,11 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
+
+ ```go
+ logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
+ ```
Third party logging formatters:
diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
index 42e7a4c98252eaf8885ad7b8e17b20a7a62ba0d1..cb5759a35cf529c1e73aab21e32d0ab3ce5ec34d 100644
--- a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
+++ b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -3,21 +3,16 @@ package main
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
- "github.com/tobi/airbrake-go"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
- log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
+ log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
}
func main() {
- airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
- airbrake.ApiKey = "whatever"
- airbrake.Environment = "production"
-
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
new file mode 100644
index 0000000000000000000000000000000000000000..34b1ccbca60d1d6957775a42d06ea02eb8c864f2
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
@@ -0,0 +1,48 @@
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "time"
+)
+
+// Formatter generates json in logstash format.
+// Logstash site: http://logstash.net/
+type LogstashFormatter struct {
+ Type string // if not empty use for logstash type field.
+}
+
+func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ entry.Data["@version"] = 1
+ entry.Data["@timestamp"] = entry.Time.Format(time.RFC3339)
+
+ // set message field
+ v, ok := entry.Data["message"]
+ if ok {
+ entry.Data["fields.message"] = v
+ }
+ entry.Data["message"] = entry.Message
+
+ // set level field
+ v, ok = entry.Data["level"]
+ if ok {
+ entry.Data["fields.level"] = v
+ }
+ entry.Data["level"] = entry.Level.String()
+
+ // set type field
+ if f.Type != "" {
+ v, ok = entry.Data["type"]
+ if ok {
+ entry.Data["fields.type"] = v
+ }
+ entry.Data["type"] = f.Type
+ }
+
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8814a0eae438f362e567edbbe112962eae0ab7a
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
@@ -0,0 +1,52 @@
+package logstash
+
+import (
+ "bytes"
+ "encoding/json"
+ "github.com/Sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestLogstashFormatter(t *testing.T) {
+ assert := assert.New(t)
+
+ lf := LogstashFormatter{Type: "abc"}
+
+ fields := logrus.Fields{
+ "message": "def",
+ "level": "ijk",
+ "type": "lmn",
+ "one": 1,
+ "pi": 3.14,
+ "bool": true,
+ }
+
+ entry := logrus.WithFields(fields)
+ entry.Message = "msg"
+ entry.Level = logrus.InfoLevel
+
+ b, _ := lf.Format(entry)
+
+ var data map[string]interface{}
+ dec := json.NewDecoder(bytes.NewReader(b))
+ dec.UseNumber()
+ dec.Decode(&data)
+
+ // base fields
+ assert.Equal(json.Number("1"), data["@version"])
+ assert.NotEmpty(data["@timestamp"])
+ assert.Equal("abc", data["type"])
+ assert.Equal("msg", data["message"])
+ assert.Equal("info", data["level"])
+
+ // substituted fields
+ assert.Equal("def", data["fields.message"])
+ assert.Equal("ijk", data["fields.level"])
+ assert.Equal("lmn", data["fields.type"])
+
+ // formats
+ assert.Equal(json.Number("1"), data["one"])
+ assert.Equal(json.Number("3.14"), data["pi"])
+ assert.Equal(true, data["bool"])
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
index 75f4db1513e4f1aa342d3ceeb4378dcbe6882e06..b0502c335a96d5389512f63e2cff71ff217a78f3 100644
--- a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
@@ -1,51 +1,51 @@
-package logrus_airbrake
+package airbrake
import (
+ "errors"
+ "fmt"
+
"github.com/Sirupsen/logrus"
"github.com/tobi/airbrake-go"
)
// AirbrakeHook to send exceptions to an exception-tracking service compatible
-// with the Airbrake API. You must set:
-// * airbrake.Endpoint
-// * airbrake.ApiKey
-// * airbrake.Environment
-//
-// Before using this hook, to send an error. Entries that trigger an Error,
-// Fatal or Panic should now include an "error" field to send to Airbrake.
-type AirbrakeHook struct{}
-
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
- if entry.Data["error"] == nil {
- entry.Logger.WithFields(logrus.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
- return nil
+// with the Airbrake API.
+type airbrakeHook struct {
+ APIKey string
+ Endpoint string
+ Environment string
+}
+
+func NewHook(endpoint, apiKey, env string) *airbrakeHook {
+ return &airbrakeHook{
+ APIKey: apiKey,
+ Endpoint: endpoint,
+ Environment: env,
}
+}
+
+func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
+ airbrake.ApiKey = hook.APIKey
+ airbrake.Endpoint = hook.Endpoint
+ airbrake.Environment = hook.Environment
+ var notifyErr error
err, ok := entry.Data["error"].(error)
- if !ok {
- entry.Logger.WithFields(logrus.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
- return nil
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
}
- airErr := airbrake.Notify(err)
+ airErr := airbrake.Notify(notifyErr)
if airErr != nil {
- entry.Logger.WithFields(logrus.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- "error": airErr,
- }).Warn("Failed to send error to Airbrake")
+ return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
}
return nil
}
-func (hook *AirbrakeHook) Levels() []logrus.Level {
+func (hook *airbrakeHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..058a91e3438204512dcfd1c27ea1892462609763
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
@@ -0,0 +1,133 @@
+package airbrake
+
+import (
+ "encoding/xml"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type notice struct {
+ Error NoticeError `xml:"error"`
+}
+type NoticeError struct {
+ Class string `xml:"class"`
+ Message string `xml:"message"`
+}
+
+type customErr struct {
+ msg string
+}
+
+func (e *customErr) Error() string {
+ return e.msg
+}
+
+const (
+ testAPIKey = "abcxyz"
+ testEnv = "development"
+ expectedClass = "*airbrake.customErr"
+ expectedMsg = "foo"
+ unintendedMsg = "Airbrake will not see this string"
+)
+
+var (
+ noticeError = make(chan NoticeError, 1)
+)
+
+// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
+// method causes an XML payload containing the log entry message is received
+// by a HTTP server emulating an Airbrake-compatible endpoint.
+func TestLogEntryMessageReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.Error(expectedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryMessageReceived confirms that, when passing an error type using
+// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
+// error message returned by the Error() method on the error interface
+// rather than the logrus.Entry.Message string.
+func TestLogEntryWithErrorReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": &customErr{expectedMsg},
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ if received.Class != expectedClass {
+ t.Errorf("Unexpected error class: %s", received.Class)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
+// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
+// endpoint receives the logrus.Entry.Message string.
+//
+// Only error types are supported when setting the 'error' field using
+// logrus.WithFields().
+func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
+ log := logrus.New()
+ ts := startAirbrakeServer(t)
+ defer ts.Close()
+
+ hook := NewHook(ts.URL, testAPIKey, "production")
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": expectedMsg,
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeError:
+ if received.Message != unintendedMsg {
+ t.Errorf("Unexpected message received: %s", received.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+func startAirbrakeServer(t *testing.T) *httptest.Server {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var notice notice
+ if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil {
+ t.Error(err)
+ }
+ r.Body.Close()
+
+ noticeError <- notice.Error
+ }))
+
+ return ts
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
new file mode 100644
index 0000000000000000000000000000000000000000..d20a0f54ab7705c21d3762ab2b01d59f343215e4
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
@@ -0,0 +1,68 @@
+package logrus_bugsnag
+
+import (
+ "errors"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/bugsnag/bugsnag-go"
+)
+
+type bugsnagHook struct{}
+
+// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
+// bugsnag.Configure. Bugsnag must be configured before the hook.
+var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
+
+// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
+// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
+// failed.
+type ErrBugsnagSendFailed struct {
+ err error
+}
+
+func (e ErrBugsnagSendFailed) Error() string {
+ return "failed to send error to Bugsnag: " + e.err.Error()
+}
+
+// NewBugsnagHook initializes a logrus hook which sends exceptions to an
+// exception-tracking service compatible with the Bugsnag API. Before using
+// this hook, you must call bugsnag.Configure(). The returned object should be
+// registered with a log via `AddHook()`
+//
+// Entries that trigger an Error, Fatal or Panic should now include an "error"
+// field to send to Bugsnag.
+func NewBugsnagHook() (*bugsnagHook, error) {
+ if bugsnag.Config.APIKey == "" {
+ return nil, ErrBugsnagUnconfigured
+ }
+ return &bugsnagHook{}, nil
+}
+
+// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
+// "error" field (or the Message if the error isn't present) and sends it off.
+func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+
+ bugsnagErr := bugsnag.Notify(notifyErr)
+ if bugsnagErr != nil {
+ return ErrBugsnagSendFailed{bugsnagErr}
+ }
+
+ return nil
+}
+
+// Levels enumerates the log levels on which the error should be forwarded to
+// bugsnag: everything at or above the "Error" level.
+func (hook *bugsnagHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e9ea298d89b4316bdefc2a76ddaa6f90a67079b7
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
@@ -0,0 +1,64 @@
+package logrus_bugsnag
+
+import (
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/bugsnag/bugsnag-go"
+)
+
+type notice struct {
+ Events []struct {
+ Exceptions []struct {
+ Message string `json:"message"`
+ } `json:"exceptions"`
+ } `json:"events"`
+}
+
+func TestNoticeReceived(t *testing.T) {
+ msg := make(chan string, 1)
+ expectedMsg := "foo"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var notice notice
+ data, _ := ioutil.ReadAll(r.Body)
+ if err := json.Unmarshal(data, ¬ice); err != nil {
+ t.Error(err)
+ }
+ _ = r.Body.Close()
+
+ msg <- notice.Events[0].Exceptions[0].Message
+ }))
+ defer ts.Close()
+
+ hook := &bugsnagHook{}
+
+ bugsnag.Configure(bugsnag.Configuration{
+ Endpoint: ts.URL,
+ ReleaseStage: "production",
+ APIKey: "12345678901234567890123456789012",
+ Synchronous: true,
+ })
+
+ log := logrus.New()
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": errors.New(expectedMsg),
+ }).Error("Bugsnag will not see this string")
+
+ select {
+ case received := <-msg:
+ if received != expectedMsg {
+ t.Errorf("Unexpected message received: %s", received)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Bugsnag API")
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
index 0e38a61919326719ad6c8febdd56e2377ef5a57e..5c4c44bbe557cb12accd9edd40fd25c7e75e74e5 100644
--- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -11,11 +11,12 @@ type JSONFormatter struct{}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
- // Otherwise errors are ignored by `encoding/json`
- // https://github.com/Sirupsen/logrus/issues/137
- if err, ok := v.(error); ok {
- data[k] = err.Error()
- } else {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
data[k] = v
}
}
diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go
index b392e547a7ba2b07c3212a3e68fbe57ecbd814a6..da928a37509902f61d3214b2e1beb1b4753e514a 100644
--- a/vendor/src/github.com/Sirupsen/logrus/logger.go
+++ b/vendor/src/github.com/Sirupsen/logrus/logger.go
@@ -65,11 +65,15 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
- NewEntry(logger).Debugf(format, args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
}
func (logger *Logger) Infof(format string, args ...interface{}) {
- NewEntry(logger).Infof(format, args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
}
func (logger *Logger) Printf(format string, args ...interface{}) {
@@ -77,31 +81,45 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- NewEntry(logger).Errorf(format, args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- NewEntry(logger).Fatalf(format, args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- NewEntry(logger).Panicf(format, args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
}
func (logger *Logger) Debug(args ...interface{}) {
- NewEntry(logger).Debug(args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
}
func (logger *Logger) Info(args ...interface{}) {
- NewEntry(logger).Info(args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
}
func (logger *Logger) Print(args ...interface{}) {
@@ -109,31 +127,45 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
}
func (logger *Logger) Warning(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
}
func (logger *Logger) Error(args ...interface{}) {
- NewEntry(logger).Error(args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
}
func (logger *Logger) Fatal(args ...interface{}) {
- NewEntry(logger).Fatal(args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
}
func (logger *Logger) Panic(args ...interface{}) {
- NewEntry(logger).Panic(args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
}
func (logger *Logger) Debugln(args ...interface{}) {
- NewEntry(logger).Debugln(args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
}
func (logger *Logger) Infoln(args ...interface{}) {
- NewEntry(logger).Infoln(args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
}
func (logger *Logger) Println(args ...interface{}) {
@@ -141,21 +173,31 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
}
func (logger *Logger) Warningln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
}
func (logger *Logger) Errorln(args ...interface{}) {
- NewEntry(logger).Errorln(args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
}
func (logger *Logger) Fatalln(args ...interface{}) {
- NewEntry(logger).Fatalln(args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
}
func (logger *Logger) Panicln(args ...interface{}) {
- NewEntry(logger).Panicln(args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
}
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go
index d238bfa0b48f5ec6ebd4acd909b5b9071776fd7e..af609a53d6492443e6a664a8b81a82db2d7e05d3 100644
--- a/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go
@@ -1,4 +1,3 @@
-
package logrus
import "syscall"
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
index 71dcb6617a44972cd54b8ba2a89420eb80a0b1d5..0a06a1105f867811779ef5c8a99e23ae02b5dafe 100644
--- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -3,7 +3,6 @@ package logrus
import (
"bytes"
"fmt"
- "regexp"
"sort"
"strings"
"time"
@@ -21,7 +20,6 @@ const (
var (
baseTimestamp time.Time
isTerminal bool
- noQuoteNeeded *regexp.Regexp
)
func init() {
diff --git a/vendor/src/github.com/Sirupsen/logrus/writer.go b/vendor/src/github.com/Sirupsen/logrus/writer.go
index 90d3e01b45987df92485878e1b5924cddd2fb1de..1e30b1c753a7425ea10c7c6cb9f3762a661b519b 100644
--- a/vendor/src/github.com/Sirupsen/logrus/writer.go
+++ b/vendor/src/github.com/Sirupsen/logrus/writer.go
@@ -6,7 +6,7 @@ import (
"runtime"
)
-func (logger *Logger) Writer() (*io.PipeWriter) {
+func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)