Merge branch 'master' of github.com:docker/docker into joyentinstall
Reolved conflict in: docs/mkdocs.yml Signed-off-by: Casey Bisson <casey.bisson@joyent.com>
|
@ -232,6 +232,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// windows: show error message about modified file permissions
|
||||
// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
|
||||
if runtime.GOOS == "windows" {
|
||||
log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
// Setup an upload progress bar
|
||||
// FIXME: ProgressReader shouldn't be this annoying to use
|
||||
|
|
|
@ -1578,7 +1578,15 @@ func ServeApi(job *engine.Job) engine.Status {
|
|||
chErrors <- err
|
||||
return
|
||||
}
|
||||
chErrors <- srv.Serve()
|
||||
job.Eng.OnShutdown(func() {
|
||||
if err := srv.Close(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
|
||||
err = nil
|
||||
}
|
||||
chErrors <- err
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
@ -1223,6 +1223,7 @@ func (container *Container) initializeNetworking() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.HostnamePath = nc.HostnamePath
|
||||
container.HostsPath = nc.HostsPath
|
||||
container.ResolvConfPath = nc.ResolvConfPath
|
||||
container.Config.Hostname = nc.Config.Hostname
|
||||
|
|
|
@ -33,7 +33,7 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
|
|||
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
hostConfig.Memory = 0
|
||||
}
|
||||
if hostConfig.Memory > 0 && !daemon.SystemConfig().SwapLimit {
|
||||
if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
|
||||
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
hostConfig.MemorySwap = -1
|
||||
}
|
||||
|
|
|
@ -186,8 +186,9 @@ func mainDaemon() {
|
|||
errAPI := <-serveAPIWait
|
||||
// If we have an error here it is unique to API (as daemonErr would have
|
||||
// exited the daemon process above)
|
||||
if errAPI != nil {
|
||||
log.Errorf("Shutting down due to ServeAPI error: %v", errAPI)
|
||||
}
|
||||
eng.Shutdown()
|
||||
if errAPI != nil {
|
||||
log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
339
docs/README.md
|
@ -1,156 +1,255 @@
|
|||
# Docker Documentation
|
||||
|
||||
The source for Docker documentation is here under `sources/` and uses extended
|
||||
Markdown, as implemented by [MkDocs](http://mkdocs.org).
|
||||
The source for Docker documentation is in this directory under `sources/`. Our
|
||||
documentation uses extended Markdown, as implemented by
|
||||
[MkDocs](http://mkdocs.org). The current release of the Docker documentation
|
||||
resides on [http://docs.docker.com](http://docs.docker.com).
|
||||
|
||||
The HTML files are built and hosted on
|
||||
[http://docs.docker.com](http://docs.docker.com), and update automatically
|
||||
after each change to the `docs` branch of [Docker on
|
||||
GitHub](https://github.com/docker/docker) thanks to post-commit hooks.
|
||||
## Understanding the documentation branches and processes
|
||||
|
||||
## Contributing
|
||||
|
||||
Be sure to follow the [contribution guidelines](../CONTRIBUTING.md).
|
||||
In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
|
||||
## Getting Started
|
||||
|
||||
Docker documentation builds are done in a Docker container, which installs all
|
||||
the required tools, adds the local `docs/` directory and builds the HTML docs.
|
||||
It then starts a HTTP server on port 8000 so that you can connect and see your
|
||||
changes.
|
||||
|
||||
In the root of the `docker` source directory:
|
||||
|
||||
$ make docs
|
||||
.... (lots of output) ....
|
||||
docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
|
||||
Running at: http://0.0.0.0:8000/
|
||||
Live reload enabled.
|
||||
Hold ctrl+c to quit.
|
||||
|
||||
If you have any issues you need to debug, you can use `make docs-shell` and then
|
||||
run `mkdocs serve`
|
||||
|
||||
## Testing the links
|
||||
|
||||
You can use `make docs-test` to generate a report of missing links that are referenced in
|
||||
the documentation - there should be none.
|
||||
|
||||
## Adding a new document
|
||||
|
||||
New document (`.md`) files are added to the documentation builds by adding them
|
||||
to the menu definition in the `docs/mkdocs.yml` file.
|
||||
|
||||
## Style guide
|
||||
|
||||
If you have questions about how to write for Docker's documentation (e.g.,
|
||||
questions about grammar, syntax, formatting, styling, language, or tone) please
|
||||
see the [style guide](sources/contributing/docs_style-guide.md). If something
|
||||
isn't clear in the guide, please submit a PR to help us improve it.
|
||||
|
||||
## Working using GitHub's file editor
|
||||
|
||||
Alternatively, for small changes and typos you might want to use GitHub's built-
|
||||
in file editor. It allows you to preview your changes right on-line (though
|
||||
there can be some differences between GitHub Markdown and [MkDocs
|
||||
Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be
|
||||
careful not to create many commits. And you must still [sign your
|
||||
work!](../CONTRIBUTING.md#sign-your-work)
|
||||
|
||||
## Branches
|
||||
Docker has two primary branches for documentation:
|
||||
|
||||
| Branch | Description | URL (published via commit-hook) |
|
||||
|----------|--------------------------------|------------------------------------------------------------------------------|
|
||||
| `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) |
|
||||
| `master` | Unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
|
||||
| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
|
||||
|
||||
**There are two branches related to editing docs**: `master` and `docs`. You
|
||||
should always edit the documentation on a local branch of the `master` branch,
|
||||
and send a PR against `master`. That way your fixes will automatically get
|
||||
included in later releases, and docs maintainers can easily cherry-pick your
|
||||
changes into the `docs` release branch. In the rare case where your change is
|
||||
not forward-compatible, you may need to base your changes on the `docs` branch.
|
||||
Additions and updates to upcoming releases are made in a feature branch off of
|
||||
the `master` branch. The Docker maintainers also support a `docs` branch that
|
||||
contains the last release of documentation.
|
||||
|
||||
Also, since there is a separate `docs` branch, we can keep
|
||||
[http://docs.docker.com](http://docs.docker.com) up to date with any bugs found
|
||||
between Docker code releases.
|
||||
After a release, documentation updates are continually merged into `master` as
|
||||
they occur. This work includes new documentation for forthcoming features, bug
|
||||
fixes, and other updates. Docker's CI system automatically builds and updates
|
||||
the `master` documentation after each merge and posts it to
|
||||
[http://docs.master.dockerproject.com](http://docs.master.dockerproject.com).
|
||||
|
||||
## Publishing Documentation
|
||||
Periodically, the Docker maintainers update `docs.docker.com` between official
|
||||
releases of Docker. They do this by cherry-picking commits from `master`,
|
||||
merging them into `docs`, and then publishing the result.
|
||||
|
||||
To publish a copy of the documentation you need to have Docker up and running on
|
||||
your machine. You'll also need a `docs/awsconfig` file containing the settings
|
||||
you need to access the AWS bucket you'll be deploying to.
|
||||
In the rare case where a change is not forward-compatible, changes may be made
|
||||
on other branches by special arrangement with the Docker maintainers.
|
||||
|
||||
The release script will create an s3 if needed, and will then push the files to it.
|
||||
### Quickstart for documentation contributors
|
||||
|
||||
[profile dowideit-docs]
|
||||
aws_access_key_id = IHOIUAHSIDH234rwf....
|
||||
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
|
||||
region = ap-southeast-2
|
||||
If you are a new or beginner contributor, we encourage you to read through the
|
||||
[our detailed contributors
|
||||
guide](https://docs.docker.com/project/who-written-for/). The guide explains in
|
||||
detail, with examples, how to contribute. If you are an experienced contributor
|
||||
this quickstart should be enough to get you started.
|
||||
|
||||
The `profile` name must be the same as the name of the bucket you are deploying
|
||||
to - which you call from the `docker` directory:
|
||||
The following is the essential workflow for contributing to the documentation:
|
||||
|
||||
make AWS_S3_BUCKET=dowideit-docs docs-release
|
||||
1. Fork the `docker/docker` repository.
|
||||
|
||||
This will publish _only_ to the `http://bucket-url/v1.2/` version of the
|
||||
documentation.
|
||||
2. Clone the repository to your local machine.
|
||||
|
||||
If you're publishing the current release's documentation, you need to
|
||||
also update the root docs pages by running
|
||||
3. Select an issue from `docker/docker` to work on or submit a proposal of your
|
||||
own.
|
||||
|
||||
make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
|
||||
4. Create a feature branch from `master` in which to work.
|
||||
|
||||
> **Note:**
|
||||
> if you are using Boot2Docker on OSX and the above command returns an error,
|
||||
> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
|
||||
> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker
|
||||
> host. Run `eval "$(boot2docker shellinit)"` to see the correct variable to set. The command
|
||||
> will return the full `export` command, so you can just cut and paste.
|
||||
By basing from `master` your work is automatically included in the next
|
||||
release. It also allows docs maintainers to easily cherry-pick your changes
|
||||
into the `docs` release branch.
|
||||
|
||||
4. Modify existing or add new `.md` files to the `docs/sources` directory.
|
||||
|
||||
If you add a new document (`.md`) file, you must also add it to the
|
||||
appropriate section of the `docs/mkdocs.yml` file in this repository.
|
||||
|
||||
|
||||
5. As you work, build the documentation site locally to see your changes.
|
||||
|
||||
The `docker/docker` repository contains a `Dockerfile` and a `Makefile`.
|
||||
Together, these create a development environment in which you can build and
|
||||
run a container running the Docker documentation website. To build the
|
||||
documentation site, enter `make docs` at the root of your `docker/docker`
|
||||
fork:
|
||||
|
||||
$ make docs
|
||||
.... (lots of output) ....
|
||||
docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
|
||||
Running at: http://0.0.0.0:8000/
|
||||
Live reload enabled.
|
||||
Hold ctrl+c to quit.
|
||||
|
||||
|
||||
The build creates an image containing all the required tools, adds the local
|
||||
`docs/` directory and generates the HTML files. Then, it runs a Docker
|
||||
container with this image.
|
||||
|
||||
The container exposes port 8000 on the localhost so that you can connect and
|
||||
see your changes. If you are running Boot2Docker, use the `boot2docker ip`
|
||||
to get the address of your server.
|
||||
|
||||
6. Check your writing for style and mechanical errors.
|
||||
|
||||
Use our [documentation style
|
||||
guide](https://docs.docker.com/project/doc-style/) to check style. There are
|
||||
several [good grammar and spelling online
|
||||
checkers](http://www.hemingwayapp.com/) that can check your writing
|
||||
mechanics.
|
||||
|
||||
7. Squash your commits on your branch.
|
||||
|
||||
8. Make a pull request from your fork back to Docker's `master` branch.
|
||||
|
||||
9. Work with the reviewers until your change is approved and merged.
|
||||
|
||||
### Debugging and testing
|
||||
|
||||
If you have any issues you need to debug, you can use `make docs-shell` and then
|
||||
run `mkdocs serve`. You can use `make docs-test` to generate a report of missing
|
||||
links that are referenced in the documentation—there should be none.
|
||||
|
||||
## Style guide
|
||||
|
||||
If you have questions about how to write for Docker's documentation, please see
|
||||
the [style guide](sources/project/doc-style.md). The style guide provides
|
||||
guidance about grammar, syntax, formatting, styling, language, or tone. If
|
||||
something isn't clear in the guide, please submit an issue to let us know or
|
||||
submit a pull request to help us improve it.
|
||||
|
||||
|
||||
## Publishing documentation (for Docker maintainers)
|
||||
|
||||
To publish Docker's documentation you need to have Docker up and running on your
|
||||
machine. You'll also need a `docs/awsconfig` file containing the settings you
|
||||
need to access the AWS bucket you'll be deploying to.
|
||||
|
||||
The process for publishing is to build first to an AWS bucket, verify the build,
|
||||
and then publish the final release.
|
||||
|
||||
1. Have Docker installed and running on your machine.
|
||||
|
||||
2. Ask the core maintainers for the `awsconfig` file.
|
||||
|
||||
3. Copy the `awsconfig` file to the `docs/` directory.
|
||||
|
||||
The `awsconfig` file contains the profiles of the S3 buckets for our
|
||||
documentation sites. (If needed, the release script creates an S3 bucket and
|
||||
pushes the files to it.) Each profile has this format:
|
||||
|
||||
[profile dowideit-docs]
|
||||
aws_access_key_id = IHOIUAHSIDH234rwf....
|
||||
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
|
||||
region = ap-southeast-2
|
||||
|
||||
The `profile` name must be the same as the name of the bucket you are
|
||||
deploying to.
|
||||
|
||||
4. Call the `make` from the `docker` directory.
|
||||
|
||||
$ make AWS_S3_BUCKET=dowideit-docs docs-release
|
||||
|
||||
This publishes _only_ to the `http://bucket-url/v1.2/` version of the
|
||||
documentation.
|
||||
|
||||
5. If you're publishing the current release's documentation, you need to also
|
||||
update the root docs pages by running
|
||||
|
||||
$ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
|
||||
|
||||
### Errors publishing using Boot2Docker
|
||||
|
||||
Sometimes, in a Boot2Docker environment, the publishing procedure returns this
|
||||
error:
|
||||
|
||||
Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
|
||||
dial unix /var/run/docker.sock: no such file or directory.
|
||||
|
||||
If this happens, set the Docker host. Run the following command to set the
|
||||
variables in your shell:
|
||||
|
||||
$ eval "$(boot2docker shellinit)"
|
||||
|
||||
## Cherry-picking documentation changes to update an existing release.
|
||||
|
||||
Whenever the core team makes a release, they publish the documentation based
|
||||
on the `release` branch (which is copied into the `docs` branch). The
|
||||
documentation team can make updates in the meantime, by cherry-picking changes
|
||||
from `master` into any of the docs branches.
|
||||
Whenever the core team makes a release, they publish the documentation based on
|
||||
the `release` branch. At that time, the `release` branch is copied into the
|
||||
`docs` branch. The documentation team makes updates between Docker releases by
|
||||
cherry-picking changes from `master` into any of the documentation branches.
|
||||
Typically, we cherry-pick into the `docs` branch.
|
||||
|
||||
For example, to update the current release's docs:
|
||||
For example, to update the current release's docs, do the following:
|
||||
|
||||
git fetch upstream
|
||||
git checkout -b post-1.2.0-docs-update-1 upstream/docs
|
||||
# Then go through the Merge commit linked to PR's (making sure they apply
|
||||
to that release)
|
||||
# see https://github.com/docker/docker/commits/master
|
||||
git cherry-pick -x fe845c4
|
||||
# Repeat until you have cherry picked everything you will propose to be merged
|
||||
git push upstream post-1.2.0-docs-update-1
|
||||
1. Go to your `docker/docker` fork and get the latest from master.
|
||||
|
||||
Then make a pull request to merge into the `docs` branch, __NOT__ into master.
|
||||
$ git fetch upstream
|
||||
|
||||
2. Checkout a new branch based on `upstream/docs`.
|
||||
|
||||
Once the PR has the needed `LGTM`s, merge it, then publish to our beta server
|
||||
to test:
|
||||
You should give your new branch a descriptive name.
|
||||
|
||||
git fetch upstream
|
||||
git checkout docs
|
||||
git reset --hard upstream/docs
|
||||
make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
|
||||
$ git checkout -b post-1.2.0-docs-update-1 upstream/docs
|
||||
|
||||
3. In a browser window, open [https://github.com/docker/docker/commits/master].
|
||||
|
||||
Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/
|
||||
to view your results and make sure what you published is what you wanted.
|
||||
4. Locate the merges you want to publish.
|
||||
|
||||
When you're happy with it, publish the docs to our live site:
|
||||
You should only cherry-pick individual commits; do not cherry-pick merge
|
||||
commits. To minimize merge conflicts, start with the oldest commit and work
|
||||
your way forward in time.
|
||||
|
||||
make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release
|
||||
5. Copy the commit SHA from GitHub.
|
||||
|
||||
Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/
|
||||
6. Cherry-pick the commit.
|
||||
|
||||
$ git cherry-pick -x fe845c4
|
||||
|
||||
7. Repeat until you have cherry-picked everything you want to merge.
|
||||
|
||||
8. Push your changes to your fork.
|
||||
|
||||
$ git push origin post-1.2.0-docs-update-1
|
||||
|
||||
9. Make a pull request to merge into the `docs` branch.
|
||||
|
||||
Do __NOT__ merge into `master`.
|
||||
|
||||
10. Have maintainers review your pull request.
|
||||
|
||||
11. Once the PR has the needed "LGTMs", merge it on GitHub.
|
||||
|
||||
12. Return to your local fork and make sure you are still on the `docs` branch.
|
||||
|
||||
$ git checkout docs
|
||||
|
||||
13. Fetch your merged pull request from `docs`.
|
||||
|
||||
$ git fetch upstream/docs
|
||||
|
||||
14. Ensure your branch is clean and set to the latest.
|
||||
|
||||
$ git reset --hard upstream/docs
|
||||
|
||||
Note that the new docs will not appear live on the site until the cache (a complex,
|
||||
distributed CDN system) is flushed. The `make docs-release` command will do this
|
||||
_if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID (ask the meta
|
||||
team) - this will take at least 15 minutes to run and you can check its progress
|
||||
with the CDN Cloudfront Chrome addin.
|
||||
15. Copy the `awsconfig` file into the `docs` directory.
|
||||
|
||||
16. Make the beta documentation
|
||||
|
||||
$ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
|
||||
|
||||
17. Open [the beta
|
||||
website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site
|
||||
and make sure what you published is correct.
|
||||
|
||||
19. When you're happy with your content, publish the docs to our live site:
|
||||
|
||||
$ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes
|
||||
DISTRIBUTION_ID=C2K6......FL2F docs-release
|
||||
|
||||
20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/]
|
||||
|
||||
|
||||
### Caching and the docs
|
||||
|
||||
New docs do not appear live on the site until the cache (a complex, distributed
|
||||
CDN system) is flushed. The `make docs-release` command flushes the cache _if_
|
||||
the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush
|
||||
can take at least 15 minutes to run and you can check its progress with the CDN
|
||||
Cloudfront Purge Tool Chrome app.
|
||||
|
||||
## Removing files from the docs.docker.com site
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ pages:
|
|||
- ['installation/gentoolinux.md', 'Installation', 'Gentoo']
|
||||
- ['installation/softlayer.md', 'Installation', 'IBM Softlayer']
|
||||
- ['installation/joyent.md', 'Installation', 'Joyent Compute Service']
|
||||
- ['installation/azure.md', 'Installation', 'Microsoft Azure']
|
||||
- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud']
|
||||
- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux']
|
||||
- ['installation/oracle.md', 'Installation', 'Oracle Linux']
|
||||
|
|
|
@ -63,13 +63,15 @@ public or private GitHub repositories with a `Dockerfile`.
|
|||
|
||||
### GitHub Submodules
|
||||
|
||||
If your GitHub repository contains links to private submodules, you'll
|
||||
need to add a deploy key from your Docker Hub repository.
|
||||
If your GitHub repository contains links to private submodules, you'll get an
|
||||
error message in your build.
|
||||
|
||||
Your Docker Hub deploy key is located under the "Build Details"
|
||||
menu on the Automated Build's main page in the Hub. Add this key
|
||||
to your GitHub submodule by visiting the Settings page for the
|
||||
repository on GitHub and selecting "Deploy keys".
|
||||
Normally, the Docker Hub sets up a deploy key in your GitHub repository.
|
||||
Unfortunately, GitHub only allows a repository deploy key to access a single repository.
|
||||
|
||||
To work around this, you need to create a dedicated user account in GitHub and attach
|
||||
the automated build's deploy key that account. This dedicated build account
|
||||
can be limited to read-only access to just the repositories required to build.
|
||||
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
|
@ -82,15 +84,33 @@ repository on GitHub and selecting "Deploy keys".
|
|||
<tbody>
|
||||
<tr>
|
||||
<td>1.</td>
|
||||
<td><img src="/docker-hub/hub-images/deploy_key.png"></td>
|
||||
<td>Your automated build's deploy key is in the "Build Details" menu
|
||||
under "Deploy keys".</td>
|
||||
<td><img src="/docker-hub/hub-images/gh_org_members.png"></td>
|
||||
<td>First, create the new account in GitHub. It should be given read-only
|
||||
access to the main repository and all submodules that are needed.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>2.</td>
|
||||
<td><img src="/docker-hub/hub-images/github_deploy_key.png"></td>
|
||||
<td>In your GitHub submodule's repository Settings page, add the
|
||||
deploy key from your Docker Hub Automated Build.</td>
|
||||
<td><img src="/docker-hub/hub-images/gh_team_members.png"></td>
|
||||
<td>This can be accomplished by adding the account to a read-only team in
|
||||
the organization(s) where the main GitHub repository and all submodule
|
||||
repositories are kept.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>3.</td>
|
||||
<td><img src="/docker-hub/hub-images/gh_repo_deploy_key.png"></td>
|
||||
<td>Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>4.</td>
|
||||
<td><img src="/docker-hub/hub-images/deploy_key.png"></td>
|
||||
<td>Your automated build's deploy key is in the "Build Details" menu
|
||||
under "Deploy keys".</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>5.</td>
|
||||
<td><img src="/docker-hub/hub-images/gh_add_ssh_user_key.png"></td>
|
||||
<td>In your dedicated GitHub User account, add the deploy key from your
|
||||
Docker Hub Automated Build.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
BIN
docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
docs/sources/docker-hub/hub-images/gh_org_members.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png
Normal file
After Width: | Height: | Size: 32 KiB |
BIN
docs/sources/docker-hub/hub-images/gh_team_members.png
Normal file
After Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 20 KiB |
27
docs/sources/installation/azure.md
Normal file
|
@ -0,0 +1,27 @@
|
|||
page_title: Installation on Microsoft Azure Platform
|
||||
page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform.
|
||||
page_keywords: Docker, Docker documentation, installation, azure, microsoft
|
||||
|
||||
# Microsoft Azure
|
||||
|
||||
|
||||
## Creating a Docker host machine on Azure
|
||||
|
||||
Please check out to the following detailed tutorials on [Microsoft Azure][0]
|
||||
website to find out different ways to create a Docker-ready Linux virtual
|
||||
machines on Azure:
|
||||
|
||||
* [Docker Virtual Machine Extensions on Azure][1]
|
||||
* [How to use the Docker VM Extension from Azure Cross-Platform Interface][2]
|
||||
* [How to use the Docker VM Extension with the Azure Portal][3]
|
||||
* [Using Docker Machine with Azure][4]
|
||||
|
||||
## What next?
|
||||
|
||||
Continue with the [User Guide](/userguide/).
|
||||
|
||||
[0]: http://azure.microsoft.com/
|
||||
[1]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/
|
||||
[2]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-xplat-cli/
|
||||
[3]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-portal/
|
||||
[4]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/
|
|
@ -1,6 +1,6 @@
|
|||
page_title: Create a pull request (PR)
|
||||
page_description: Basic workflow for Docker contributions
|
||||
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
|
||||
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
|
||||
|
||||
# Create a pull request (PR)
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
page_title: Make a project contribution
|
||||
page_description: Basic workflow for Docker contributions
|
||||
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
|
||||
page_keywords: contribute, pull request, review, workflow, beginner, expert, squash, commit
|
||||
|
||||
<style type="text/css">
|
||||
|
||||
|
@ -8,7 +8,7 @@ page_keywords: contribute, pull request, review, workflow, white-belt, black-bel
|
|||
.gh-label {
|
||||
display: inline-block;
|
||||
padding: 3px 4px;
|
||||
font-size: 11px;
|
||||
font-size: 12px;
|
||||
font-weight: bold;
|
||||
line-height: 1;
|
||||
color: #fff;
|
||||
|
@ -16,11 +16,25 @@ page_keywords: contribute, pull request, review, workflow, white-belt, black-bel
|
|||
box-shadow: inset 0 -1px 0 rgba(0,0,0,0.12);
|
||||
}
|
||||
|
||||
.gh-label.black-belt { background-color: #000000; color: #ffffff; }
|
||||
.gh-label.bug { background-color: #fc2929; color: #ffffff; }
|
||||
.gh-label.improvement { background-color: #bfe5bf; color: #2a332a; }
|
||||
.gh-label.project-doc { background-color: #207de5; color: #ffffff; }
|
||||
.gh-label.white-belt { background-color: #ffffff; color: #333333; }
|
||||
/* Experience */
|
||||
.gh-label.beginner { background-color: #B5E0B5; color: #333333; }
|
||||
.gh-label.expert { background-color: #599898; color: #ffffff; }
|
||||
.gh-label.master { background-color: #306481; color: #ffffff; }
|
||||
.gh-label.novice { background-color: #D6F2AC; color: #333333; }
|
||||
.gh-label.proficient { background-color: #8DC7A9; color: #333333; }
|
||||
|
||||
/* Kind */
|
||||
.gh-label.bug { background-color: #FF9DA4; color: #333333; }
|
||||
.gh-label.cleanup { background-color: #FFB7B3; color: #333333; }
|
||||
.gh-label.content { background-color: #CDD3C2; color: #333333; }
|
||||
.gh-label.feature { background-color: #B7BEB7; color: #333333; }
|
||||
.gh-label.graphics { background-color: #E1EFCB; color: #333333; }
|
||||
.gh-label.improvement { background-color: #EBD2BB; color: #333333; }
|
||||
.gh-label.proposal { background-color: #FFD9C0; color: #333333; }
|
||||
.gh-label.question { background-color: #EEF1D1; color: #333333; }
|
||||
.gh-label.usecase { background-color: #F0E4C2; color: #333333; }
|
||||
.gh-label.writing { background-color: #B5E9D5; color: #333333; }
|
||||
|
||||
|
||||
</style>
|
||||
|
||||
|
@ -37,20 +51,44 @@ An existing issue is something reported by a Docker user. As issues come in,
|
|||
our maintainers triage them. Triage is its own topic. For now, it is important
|
||||
for you to know that triage includes ranking issues according to difficulty.
|
||||
|
||||
Triaged issues have either a <strong class="gh-label white-belt">white-belt</strong>
|
||||
or <strong class="gh-label black-belt">black-belt</strong> label.
|
||||
A <strong class="gh-label white-belt">white-belt</strong> issue is considered
|
||||
an easier issue. Issues can have more than one label, for example,
|
||||
<strong class="gh-label bug">bug</strong>,
|
||||
<strong class="gh-label improvement">improvement</strong>,
|
||||
<strong class="gh-label project-doc">project/doc</strong>, and so forth.
|
||||
These other labels are there for filtering purposes but you might also find
|
||||
them helpful.
|
||||
Triaged issues have one of these labels:
|
||||
|
||||
<table class="tg">
|
||||
<tr>
|
||||
<td class="tg-031e">Level</td>
|
||||
<td class="tg-031e">Experience level guideline</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-031e"><strong class="gh-label beginner">exp/beginner</strong></td>
|
||||
<td class="tg-031e">You have made less than 10 contributions in your life time to any open source project.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-031e"><strong class="gh-label novice">exp/novice</strong></td>
|
||||
<td class="tg-031e">You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-031e"><strong class="gh-label proficient">exp/proficient</strong></td>
|
||||
<td class="tg-031e">You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-031e"><strong class="gh-label expert">exp/expert</strong></td>
|
||||
<td class="tg-031e">You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-031e"><strong class="gh-label master">exp/master</strong></td>
|
||||
<td class="tg-031e">You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Claim a white-belt issue
|
||||
As the table states, these labels are meant as guidelines. You might have
|
||||
written a whole plugin for Docker in a personal project and never contributed to
|
||||
Docker. With that kind of experience, you could take on an <strong
|
||||
class="gh-label expert">exp/expert</strong> or <strong class="gh-label
|
||||
master">exp/master</strong> level task.
|
||||
|
||||
In this section, you find and claim an open white-belt issue.
|
||||
## Claim a beginner or novice issue
|
||||
|
||||
In this section, you find and claim an open documentation lines issue.
|
||||
|
||||
|
||||
1. Go to the `docker/docker` <a
|
||||
|
@ -62,11 +100,11 @@ In this section, you find and claim an open white-belt issue.
|
|||
|
||||

|
||||
|
||||
3. Look for the <strong class="gh-label white-belt">white-belt</strong> items on the list.
|
||||
3. Look for the <strong class="gh-label beginner">exp/beginner</strong> items on the list.
|
||||
|
||||
4. Click on the "labels" dropdown and select <strong class="gh-label white-belt">white-belt</strong>.
|
||||
4. Click on the "labels" dropdown and select <strong class="gh-label beginner">exp/beginner</strong>.
|
||||
|
||||
The system filters to show only open <strong class="gh-label white-belt">white-belt</strong> issues.
|
||||
The system filters to show only open <strong class="gh-label beginner">exp/beginner</strong> issues.
|
||||
|
||||
5. Open an issue that interests you.
|
||||
|
||||
|
@ -75,21 +113,18 @@ In this section, you find and claim an open white-belt issue.
|
|||
|
||||
6. Make sure that no other user has chosen to work on the issue.
|
||||
|
||||
We don't allow external contributors to assign issues to themselves, so you
|
||||
need to read the comments to find if a user claimed an issue by saying:
|
||||
|
||||
- "I'd love to give this a try~"
|
||||
- "I'll work on this!"
|
||||
- "I'll take this."
|
||||
|
||||
The community is very good about claiming issues explicitly.
|
||||
We don't allow external contributors to assign issues to themselves. So, you
|
||||
need to read the comments to find if a user claimed the issue by leaving a
|
||||
`#dibs` comment on the issue.
|
||||
|
||||
7. When you find an open issue that both interests you and is unclaimed, claim it yourself by adding a comment.
|
||||
7. When you find an open issue that both interests you and is unclaimed, add a
|
||||
`#dibs` comment.
|
||||
|
||||

|
||||
|
||||
This example uses issue 11038. Your issue # will be different depending on
|
||||
what you claimed.
|
||||
what you claimed. After a moment, Gordon the Docker bot, changes the issue
|
||||
status to claimed.
|
||||
|
||||
8. Make a note of the issue number; you'll need it later.
|
||||
|
||||
|
|
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 112 KiB |
|
@ -16,7 +16,7 @@ process simple so you'll want to contribute frequently.
|
|||
## The basic contribution workflow
|
||||
|
||||
In this guide, you work through Docker's basic contribution workflow by fixing a
|
||||
single *white-belt* issue in the `docker/docker` repository. The workflow
|
||||
single *beginner* issue in the `docker/docker` repository. The workflow
|
||||
for fixing simple issues looks like this:
|
||||
|
||||

|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
page_title: Participate in the PR Review
|
||||
page_description: Basic workflow for Docker contributions
|
||||
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
|
||||
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
|
||||
|
||||
|
||||
# Participate in the PR Review
|
||||
|
@ -117,8 +117,7 @@ see the GitHub help on deleting branches</a>.
|
|||
## Where to go next
|
||||
|
||||
At this point, you have completed all the basic tasks in our contributors guide.
|
||||
If you enjoyed contributing, let us know by completing another
|
||||
<strong class="gh-label white-belt">white-belt</strong>
|
||||
If you enjoyed contributing, let us know by completing another beginner
|
||||
issue or two. We really appreciate the help.
|
||||
|
||||
If you are very experienced and want to make a major change, go on to
|
||||
|
|
|
@ -138,7 +138,7 @@ As you change code in your fork, you make your changes on a repository branch.
|
|||
The branch name should reflect what you are working on. In this section, you
|
||||
create a branch, make a change, and push it up to your fork.
|
||||
|
||||
This branch is just for testing your config for this guide. The changes arepart
|
||||
This branch is just for testing your config for this guide. The changes are part
|
||||
of a dry run so the branch name is going to be dry-run-test. To create an push
|
||||
the branch to your fork on GitHub:
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
page_title: Work on your issue
|
||||
page_description: Basic workflow for Docker contributions
|
||||
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
|
||||
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
|
||||
|
||||
|
||||
# Work on your issue
|
||||
|
|
|
@ -1514,14 +1514,6 @@ just a specific mapping:
|
|||
$ sudo docker port test 7890
|
||||
0.0.0.0:4321
|
||||
|
||||
## rename
|
||||
|
||||
Usage: docker rename OLD_NAME NEW_NAME
|
||||
|
||||
rename a existing container to a NEW_NAME
|
||||
|
||||
The `docker rename` command allows the container to be renamed to a different name.
|
||||
|
||||
## ps
|
||||
|
||||
Usage: docker ps [OPTIONS]
|
||||
|
@ -1617,6 +1609,14 @@ use `docker pull`:
|
|||
Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com)
|
||||
registry or to a self-hosted one.
|
||||
|
||||
## rename
|
||||
|
||||
Usage: docker rename OLD_NAME NEW_NAME
|
||||
|
||||
rename a existing container to a NEW_NAME
|
||||
|
||||
The `docker rename` command allows the container to be renamed to a different name.
|
||||
|
||||
## restart
|
||||
|
||||
Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...]
|
||||
|
|
|
@ -46,18 +46,19 @@ func unregister(name string) {
|
|||
// It acts as a store for *containers*, and allows manipulation of these
|
||||
// containers by executing *jobs*.
|
||||
type Engine struct {
|
||||
handlers map[string]Handler
|
||||
catchall Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
Logging bool
|
||||
tasks sync.WaitGroup
|
||||
l sync.RWMutex // lock for shutdown
|
||||
shutdown bool
|
||||
onShutdown []func() // shutdown handlers
|
||||
handlers map[string]Handler
|
||||
catchall Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
Logging bool
|
||||
tasks sync.WaitGroup
|
||||
l sync.RWMutex // lock for shutdown
|
||||
shutdownWait sync.WaitGroup
|
||||
shutdown bool
|
||||
onShutdown []func() // shutdown handlers
|
||||
}
|
||||
|
||||
func (eng *Engine) Register(name string, handler Handler) error {
|
||||
|
@ -143,6 +144,7 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
|||
func (eng *Engine) OnShutdown(h func()) {
|
||||
eng.l.Lock()
|
||||
eng.onShutdown = append(eng.onShutdown, h)
|
||||
eng.shutdownWait.Add(1)
|
||||
eng.l.Unlock()
|
||||
}
|
||||
|
||||
|
@ -156,6 +158,7 @@ func (eng *Engine) Shutdown() {
|
|||
eng.l.Lock()
|
||||
if eng.shutdown {
|
||||
eng.l.Unlock()
|
||||
eng.shutdownWait.Wait()
|
||||
return
|
||||
}
|
||||
eng.shutdown = true
|
||||
|
@ -180,17 +183,15 @@ func (eng *Engine) Shutdown() {
|
|||
|
||||
// Call shutdown handlers, if any.
|
||||
// Timeout after 10 seconds.
|
||||
var wg sync.WaitGroup
|
||||
for _, h := range eng.onShutdown {
|
||||
wg.Add(1)
|
||||
go func(h func()) {
|
||||
defer wg.Done()
|
||||
h()
|
||||
eng.shutdownWait.Done()
|
||||
}(h)
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
eng.shutdownWait.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
|
|
|
@ -74,7 +74,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
|||
logName = utils.ImageReference(logName, tag)
|
||||
}
|
||||
|
||||
if len(repoInfo.Index.Mirrors) == 0 && ((repoInfo.Official && repoInfo.Index.Official) || endpoint.Version == registry.APIVersion2) {
|
||||
if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
|
||||
if repoInfo.Official {
|
||||
j := job.Eng.Job("trust_update_base")
|
||||
if err = j.Run(); err != nil {
|
||||
|
|
|
@ -539,7 +539,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
|
|||
return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
|
||||
}
|
||||
|
||||
if endpoint.Version == registry.APIVersion2 {
|
||||
if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
|
||||
err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf)
|
||||
if err == nil {
|
||||
return engine.StatusOK
|
||||
|
|
|
@ -53,7 +53,7 @@ clone hg code.google.com/p/gosqlite 74691fb6f837
|
|||
|
||||
clone git github.com/docker/libtrust 230dfd18c232
|
||||
|
||||
clone git github.com/Sirupsen/logrus v0.6.6
|
||||
clone git github.com/Sirupsen/logrus v0.7.1
|
||||
|
||||
clone git github.com/go-fsnotify/fsnotify v1.0.4
|
||||
|
||||
|
|
|
@ -4623,8 +4623,19 @@ func TestBuildStderr(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stderr != "" {
|
||||
t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// stderr might contain a security warning on windows
|
||||
lines := strings.Split(stderr, "\n")
|
||||
for _, v := range lines {
|
||||
if v != "" && !strings.Contains(v, "SECURITY WARNING:") {
|
||||
t.Fatalf("Stderr contains unexpected output line: %q", v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if stderr != "" {
|
||||
t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
|
||||
}
|
||||
}
|
||||
logDone("build - testing stderr")
|
||||
}
|
||||
|
@ -5113,9 +5124,13 @@ func TestBuildSpaces(t *testing.T) {
|
|||
t.Fatal("Build 2 was supposed to fail, but didn't")
|
||||
}
|
||||
|
||||
removeLogTimestamps := func(s string) string {
|
||||
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
|
||||
}
|
||||
|
||||
// Skip over the times
|
||||
e1 := err1.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e2 := err2.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e1 := removeLogTimestamps(err1.Error())
|
||||
e2 := removeLogTimestamps(err2.Error())
|
||||
|
||||
// Ignore whitespace since that's what were verifying doesn't change stuff
|
||||
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
|
||||
|
@ -5128,8 +5143,8 @@ func TestBuildSpaces(t *testing.T) {
|
|||
}
|
||||
|
||||
// Skip over the times
|
||||
e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e1 = removeLogTimestamps(err1.Error())
|
||||
e2 = removeLogTimestamps(err2.Error())
|
||||
|
||||
// Ignore whitespace since that's what were verifying doesn't change stuff
|
||||
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
|
||||
|
@ -5142,8 +5157,8 @@ func TestBuildSpaces(t *testing.T) {
|
|||
}
|
||||
|
||||
// Skip over the times
|
||||
e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
|
||||
e1 = removeLogTimestamps(err1.Error())
|
||||
e2 = removeLogTimestamps(err2.Error())
|
||||
|
||||
// Ignore whitespace since that's what were verifying doesn't change stuff
|
||||
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
|
||||
|
|
|
@ -800,3 +800,31 @@ func TestDaemonDots(t *testing.T) {
|
|||
|
||||
logDone("daemon - test dots on INFO")
|
||||
}
|
||||
|
||||
func TestDaemonUnixSockCleanedUp(t *testing.T) {
|
||||
d := NewDaemon(t)
|
||||
dir, err := ioutil.TempDir("", "socket-cleanup-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sockPath := filepath.Join(dir, "docker.sock")
|
||||
if err := d.Start("--host", "unix://"+sockPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(sockPath); err != nil {
|
||||
t.Fatal("socket does not exist")
|
||||
}
|
||||
|
||||
if err := d.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) {
|
||||
t.Fatal("unix socket is not cleaned up")
|
||||
}
|
||||
|
||||
logDone("daemon - unix socket is cleaned up")
|
||||
}
|
||||
|
|
|
@ -55,6 +55,8 @@ func TestPullImageWithAliases(t *testing.T) {
|
|||
|
||||
// pulling library/hello-world should show verified message
|
||||
func TestPullVerified(t *testing.T) {
|
||||
t.Skip("problems verifying library/hello-world (to be fixed)")
|
||||
|
||||
// Image must be pulled from central repository to get verified message
|
||||
// unless keychain is manually updated to contain the daemon's sign key.
|
||||
|
||||
|
|
|
@ -412,6 +412,31 @@ func TestRunLinkToContainerNetMode(t *testing.T) {
|
|||
logDone("run - link to a container which net mode is container success")
|
||||
}
|
||||
|
||||
func TestRunModeNetContainerHostname(t *testing.T) {
|
||||
defer deleteAllContainers()
|
||||
cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top")
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run container: %v, output: %q", err, out)
|
||||
}
|
||||
cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname")
|
||||
out, _, err = runCommandWithOutput(cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to exec command: %v, output: %q", err, out)
|
||||
}
|
||||
|
||||
cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
|
||||
out1, _, err := runCommandWithOutput(cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run container: %v, output: %q", err, out1)
|
||||
}
|
||||
if out1 != out {
|
||||
t.Fatal("containers with shared net namespace should have same hostname")
|
||||
}
|
||||
|
||||
logDone("run - containers with shared net namespace have same hostname")
|
||||
}
|
||||
|
||||
// Regression test for #4741
|
||||
func TestRunWithVolumesAsFiles(t *testing.T) {
|
||||
defer deleteAllContainers()
|
||||
|
|
|
@ -109,23 +109,6 @@ func TestRunWithUlimits(t *testing.T) {
|
|||
logDone("run - ulimits are set")
|
||||
}
|
||||
|
||||
func getCgroupPaths(test string) map[string]string {
|
||||
cgroupPaths := map[string]string{}
|
||||
for _, line := range strings.Split(test, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) != 3 {
|
||||
fmt.Printf("unexpected file format for /proc/self/cgroup - %q\n", line)
|
||||
continue
|
||||
}
|
||||
cgroupPaths[parts[1]] = parts[2]
|
||||
}
|
||||
return cgroupPaths
|
||||
}
|
||||
|
||||
func TestRunContainerWithCgroupParent(t *testing.T) {
|
||||
testRequires(t, NativeExecDriver)
|
||||
defer deleteAllContainers()
|
||||
|
@ -135,7 +118,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to read '/proc/self/cgroup - %v", err)
|
||||
}
|
||||
selfCgroupPaths := getCgroupPaths(string(data))
|
||||
selfCgroupPaths := parseCgroupPaths(string(data))
|
||||
selfCpuCgroup, found := selfCgroupPaths["memory"]
|
||||
if !found {
|
||||
t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
|
||||
|
@ -145,7 +128,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
|
||||
}
|
||||
cgroupPaths := getCgroupPaths(string(out))
|
||||
cgroupPaths := parseCgroupPaths(string(out))
|
||||
if len(cgroupPaths) == 0 {
|
||||
t.Fatalf("unexpected output - %q", string(out))
|
||||
}
|
||||
|
@ -173,7 +156,7 @@ func TestRunContainerWithCgroupParentAbsPath(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
|
||||
}
|
||||
cgroupPaths := getCgroupPaths(string(out))
|
||||
cgroupPaths := parseCgroupPaths(string(out))
|
||||
if len(cgroupPaths) == 0 {
|
||||
t.Fatalf("unexpected output - %q", string(out))
|
||||
}
|
||||
|
|
|
@ -328,3 +328,17 @@ func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
|
||||
// a map which cgroup name as key and path as value.
|
||||
func parseCgroupPaths(procCgroupData string) map[string]string {
|
||||
cgroupPaths := map[string]string{}
|
||||
for _, line := range strings.Split(procCgroupData, "\n") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) != 3 {
|
||||
continue
|
||||
}
|
||||
cgroupPaths[parts[1]] = parts[2]
|
||||
}
|
||||
return cgroupPaths
|
||||
}
|
||||
|
|
|
@ -54,30 +54,51 @@ that the user can easily script and know the reason why the command failed.
|
|||
### Step 3: Classify the Issue
|
||||
|
||||
Classifications help both to inform readers about an issue's priority and how to resolve it.
|
||||
This is also helpful for identifying new, critical issues. Classifications types are
|
||||
applied to the issue or pull request using labels.
|
||||
This is also helpful for identifying new, critical issues. "Kinds of" are
|
||||
applied to the issue or pull request using labels. You can apply one or more labels.
|
||||
|
||||
|
||||
Types of classification:
|
||||
Kinds of classifications:
|
||||
|
||||
| Type | Description |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||
| improvement | improvements are not bugs or new features but can drastically improve usability. |
|
||||
| regression | regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. |
|
||||
| bug | bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. |
|
||||
| feature | features are new and shinny. They are things that the project does not currently support. |
|
||||
| Kind | Description |
|
||||
|------------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||
| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. |
|
||||
| kind/cleanup | Refactoring code or otherwise clarifying documentation. |
|
||||
| kind/content | Content that is not documentation such as help or error messages. |
|
||||
| kind/graphics | Work involving graphics skill |
|
||||
| kind/regression | Regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. |
|
||||
| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. |
|
||||
| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. |
|
||||
| kind/question | Contains a user or contributor question requiring a response. |
|
||||
| kind/usecase | A description of a user or contributor situation requiring a response perhaps in code or documentation. |
|
||||
| kind/writing | Writing documentation, man pages, articles, blogs, or other significant word-driven task. |
|
||||
| kind/test | Tests or test infrastructure needs adding or updating. |
|
||||
|
||||
### Step 4: Estimate the Difficulty
|
||||
|
||||
Difficulty is a way for a contributor to find an issue based on their skill set. Difficulty types are
|
||||
applied to the issue or pull request using labels.
|
||||
Contributors can add labels by using a `+kind/bug` in an issue or pull request comment.
|
||||
|
||||
Difficulty
|
||||
### Step 4: Estimate the experience level required
|
||||
|
||||
Experience level is a way for a contributor to find an issue based on their
|
||||
skill set. Experience types are applied to the issue or pull request using
|
||||
labels.
|
||||
|
||||
| Level | Experience level guideline |
|
||||
|------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| exp/beginner | You have made less than 10 contributions in your life time to any open source project. |
|
||||
| exp/novice | You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. |
|
||||
| exp/proficient | You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. |
|
||||
| exp/expert | You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. |
|
||||
| exp/master | You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. |
|
||||
|
||||
As the table states, these labels are meant as guidelines. You might have
|
||||
written a whole plugin for Docker in a personal project and never contributed to
|
||||
Docker. With that kind of experience, you could take on an <strong
|
||||
class="gh-label expert">exp/expert</strong> or <strong class="gh-label
|
||||
master">exp/master</strong> level task.
|
||||
|
||||
Contributors can add labels by using a `+exp/expert` format in issue comment.
|
||||
|
||||
| Type | Description |
|
||||
|--------------|-----------------------------------------------------------------------------|
|
||||
| white-belt | Simple, non-time consuming issue, easy first task to accomplish |
|
||||
| black-belt | Expert at the subject matter or someone who likes pain |
|
||||
|
||||
And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue.
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
|
@ -212,29 +213,14 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str
|
|||
// 'layer' is an uncompressed reader of the blob to be pushed.
|
||||
// The server will generate it's own checksum calculation.
|
||||
func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error {
|
||||
routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)
|
||||
location, err := r.initiateBlobUpload(ep, imageName, auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("[registry] Calling %q %s", "POST", routeURL)
|
||||
req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := auth.Authorize(req); err != nil {
|
||||
return err
|
||||
}
|
||||
res, _, err := r.doRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
location := res.Header.Get("Location")
|
||||
|
||||
method := "PUT"
|
||||
log.Debugf("[registry] Calling %q %s", method, location)
|
||||
req, err = r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
|
||||
req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -244,7 +230,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string
|
|||
if err := auth.Authorize(req); err != nil {
|
||||
return err
|
||||
}
|
||||
res, _, err = r.doRequest(req)
|
||||
res, _, err := r.doRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -265,6 +251,51 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string
|
|||
return nil
|
||||
}
|
||||
|
||||
// initiateBlobUpload gets the blob upload location for the given image name.
|
||||
func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) {
|
||||
routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugf("[registry] Calling %q %s", "POST", routeURL)
|
||||
req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := auth.Authorize(req); err != nil {
|
||||
return "", err
|
||||
}
|
||||
res, _, err := r.doRequest(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
if res.StatusCode == http.StatusUnauthorized {
|
||||
return "", errLoginRequired
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", ErrDoesNotExist
|
||||
}
|
||||
|
||||
errBody, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
|
||||
return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res)
|
||||
}
|
||||
|
||||
if location = res.Header.Get("Location"); location == "" {
|
||||
return "", fmt.Errorf("registry did not return a Location header for resumable blob upload for image %s", imageName)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Finally Push the (signed) manifest of the blobs we've just pushed
|
||||
func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) {
|
||||
routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
|
||||
|
|
51
vendor/src/github.com/Sirupsen/logrus/README.md
vendored
|
@ -82,7 +82,7 @@ func init() {
|
|||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(&logrus_airbrake.AirbrakeHook{})
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
@ -164,43 +164,8 @@ You can add hooks for logging levels. For example to send errors to an exception
|
|||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
```go
|
||||
// Not the real implementation of the Airbrake hook. Just a simple sample.
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(AirbrakeHook))
|
||||
}
|
||||
|
||||
type AirbrakeHook struct{}
|
||||
|
||||
// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
|
||||
// the fields for the entry. See the Fields section of the README.
|
||||
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
err := airbrake.Notify(entry.Data["error"].(error))
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Info("Failed to send error to Airbrake")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// `Levels()` returns a slice of `Levels` the hook is fired for.
|
||||
func (hook *AirbrakeHook) Levels() []log.Level {
|
||||
return []log.Level{
|
||||
log.ErrorLevel,
|
||||
log.FatalLevel,
|
||||
log.PanicLevel,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
|
@ -211,7 +176,7 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(logrus_airbrake.AirbrakeHook))
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
|
@ -233,6 +198,9 @@ func init() {
|
|||
Send errors to remote syslog server.
|
||||
Uses standard library `log/syslog` behind the scenes.
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/bugsnag`](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go)
|
||||
Send errors to the Bugsnag exception tracking service.
|
||||
|
||||
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
|
||||
Send errors to a channel in hipchat.
|
||||
|
||||
|
@ -321,6 +289,11 @@ The built-in logging formatters are:
|
|||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
|
||||
```
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
|
|
|
@ -3,21 +3,16 @@ package main
|
|||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
|
||||
log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
|
||||
airbrake.ApiKey = "whatever"
|
||||
airbrake.Environment = "production"
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
|
|
48
vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package logstash
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Formatter generates json in logstash format.
|
||||
// Logstash site: http://logstash.net/
|
||||
type LogstashFormatter struct {
|
||||
Type string // if not empty use for logstash type field.
|
||||
}
|
||||
|
||||
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
entry.Data["@version"] = 1
|
||||
entry.Data["@timestamp"] = entry.Time.Format(time.RFC3339)
|
||||
|
||||
// set message field
|
||||
v, ok := entry.Data["message"]
|
||||
if ok {
|
||||
entry.Data["fields.message"] = v
|
||||
}
|
||||
entry.Data["message"] = entry.Message
|
||||
|
||||
// set level field
|
||||
v, ok = entry.Data["level"]
|
||||
if ok {
|
||||
entry.Data["fields.level"] = v
|
||||
}
|
||||
entry.Data["level"] = entry.Level.String()
|
||||
|
||||
// set type field
|
||||
if f.Type != "" {
|
||||
v, ok = entry.Data["type"]
|
||||
if ok {
|
||||
entry.Data["fields.type"] = v
|
||||
}
|
||||
entry.Data["type"] = f.Type
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
52
vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package logstash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLogstashFormatter(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
lf := LogstashFormatter{Type: "abc"}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"message": "def",
|
||||
"level": "ijk",
|
||||
"type": "lmn",
|
||||
"one": 1,
|
||||
"pi": 3.14,
|
||||
"bool": true,
|
||||
}
|
||||
|
||||
entry := logrus.WithFields(fields)
|
||||
entry.Message = "msg"
|
||||
entry.Level = logrus.InfoLevel
|
||||
|
||||
b, _ := lf.Format(entry)
|
||||
|
||||
var data map[string]interface{}
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
dec.UseNumber()
|
||||
dec.Decode(&data)
|
||||
|
||||
// base fields
|
||||
assert.Equal(json.Number("1"), data["@version"])
|
||||
assert.NotEmpty(data["@timestamp"])
|
||||
assert.Equal("abc", data["type"])
|
||||
assert.Equal("msg", data["message"])
|
||||
assert.Equal("info", data["level"])
|
||||
|
||||
// substituted fields
|
||||
assert.Equal("def", data["fields.message"])
|
||||
assert.Equal("ijk", data["fields.level"])
|
||||
assert.Equal("lmn", data["fields.type"])
|
||||
|
||||
// formats
|
||||
assert.Equal(json.Number("1"), data["one"])
|
||||
assert.Equal(json.Number("3.14"), data["pi"])
|
||||
assert.Equal(true, data["bool"])
|
||||
}
|
|
@ -1,51 +1,51 @@
|
|||
package logrus_airbrake
|
||||
package airbrake
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
// AirbrakeHook to send exceptions to an exception-tracking service compatible
|
||||
// with the Airbrake API. You must set:
|
||||
// * airbrake.Endpoint
|
||||
// * airbrake.ApiKey
|
||||
// * airbrake.Environment
|
||||
//
|
||||
// Before using this hook, to send an error. Entries that trigger an Error,
|
||||
// Fatal or Panic should now include an "error" field to send to Airbrake.
|
||||
type AirbrakeHook struct{}
|
||||
// with the Airbrake API.
|
||||
type airbrakeHook struct {
|
||||
APIKey string
|
||||
Endpoint string
|
||||
Environment string
|
||||
}
|
||||
|
||||
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
if entry.Data["error"] == nil {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
|
||||
return nil
|
||||
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
|
||||
return &airbrakeHook{
|
||||
APIKey: apiKey,
|
||||
Endpoint: endpoint,
|
||||
Environment: env,
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
airbrake.ApiKey = hook.APIKey
|
||||
airbrake.Endpoint = hook.Endpoint
|
||||
airbrake.Environment = hook.Environment
|
||||
|
||||
var notifyErr error
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if !ok {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
|
||||
return nil
|
||||
if ok {
|
||||
notifyErr = err
|
||||
} else {
|
||||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
airErr := airbrake.Notify(err)
|
||||
airErr := airbrake.Notify(notifyErr)
|
||||
if airErr != nil {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
"error": airErr,
|
||||
}).Warn("Failed to send error to Airbrake")
|
||||
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *AirbrakeHook) Levels() []logrus.Level {
|
||||
func (hook *airbrakeHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
|
|
133
vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
package airbrake
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Error NoticeError `xml:"error"`
|
||||
}
|
||||
type NoticeError struct {
|
||||
Class string `xml:"class"`
|
||||
Message string `xml:"message"`
|
||||
}
|
||||
|
||||
type customErr struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *customErr) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
const (
|
||||
testAPIKey = "abcxyz"
|
||||
testEnv = "development"
|
||||
expectedClass = "*airbrake.customErr"
|
||||
expectedMsg = "foo"
|
||||
unintendedMsg = "Airbrake will not see this string"
|
||||
)
|
||||
|
||||
var (
|
||||
noticeError = make(chan NoticeError, 1)
|
||||
)
|
||||
|
||||
// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
|
||||
// method causes an XML payload containing the log entry message is received
|
||||
// by a HTTP server emulating an Airbrake-compatible endpoint.
|
||||
func TestLogEntryMessageReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.Error(expectedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryMessageReceived confirms that, when passing an error type using
|
||||
// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
|
||||
// error message returned by the Error() method on the error interface
|
||||
// rather than the logrus.Entry.Message string.
|
||||
func TestLogEntryWithErrorReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": &customErr{expectedMsg},
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
if received.Class != expectedClass {
|
||||
t.Errorf("Unexpected error class: %s", received.Class)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
|
||||
// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
|
||||
// endpoint receives the logrus.Entry.Message string.
|
||||
//
|
||||
// Only error types are supported when setting the 'error' field using
|
||||
// logrus.WithFields().
|
||||
func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": expectedMsg,
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != unintendedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
func startAirbrakeServer(t *testing.T) *httptest.Server {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
r.Body.Close()
|
||||
|
||||
noticeError <- notice.Error
|
||||
}))
|
||||
|
||||
return ts
|
||||
}
|
68
vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
package logrus_bugsnag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
)
|
||||
|
||||
type bugsnagHook struct{}
|
||||
|
||||
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
|
||||
// bugsnag.Configure. Bugsnag must be configured before the hook.
|
||||
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
|
||||
|
||||
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
|
||||
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
|
||||
// failed.
|
||||
type ErrBugsnagSendFailed struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e ErrBugsnagSendFailed) Error() string {
|
||||
return "failed to send error to Bugsnag: " + e.err.Error()
|
||||
}
|
||||
|
||||
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
|
||||
// exception-tracking service compatible with the Bugsnag API. Before using
|
||||
// this hook, you must call bugsnag.Configure(). The returned object should be
|
||||
// registered with a log via `AddHook()`
|
||||
//
|
||||
// Entries that trigger an Error, Fatal or Panic should now include an "error"
|
||||
// field to send to Bugsnag.
|
||||
func NewBugsnagHook() (*bugsnagHook, error) {
|
||||
if bugsnag.Config.APIKey == "" {
|
||||
return nil, ErrBugsnagUnconfigured
|
||||
}
|
||||
return &bugsnagHook{}, nil
|
||||
}
|
||||
|
||||
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
|
||||
// "error" field (or the Message if the error isn't present) and sends it off.
|
||||
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
|
||||
var notifyErr error
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if ok {
|
||||
notifyErr = err
|
||||
} else {
|
||||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
bugsnagErr := bugsnag.Notify(notifyErr)
|
||||
if bugsnagErr != nil {
|
||||
return ErrBugsnagSendFailed{bugsnagErr}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels enumerates the log levels on which the error should be forwarded to
|
||||
// bugsnag: everything at or above the "Error" level.
|
||||
func (hook *bugsnagHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
64
vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package logrus_bugsnag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Events []struct {
|
||||
Exceptions []struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"exceptions"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
func TestNoticeReceived(t *testing.T) {
|
||||
msg := make(chan string, 1)
|
||||
expectedMsg := "foo"
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
data, _ := ioutil.ReadAll(r.Body)
|
||||
if err := json.Unmarshal(data, ¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_ = r.Body.Close()
|
||||
|
||||
msg <- notice.Events[0].Exceptions[0].Message
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
hook := &bugsnagHook{}
|
||||
|
||||
bugsnag.Configure(bugsnag.Configuration{
|
||||
Endpoint: ts.URL,
|
||||
ReleaseStage: "production",
|
||||
APIKey: "12345678901234567890123456789012",
|
||||
Synchronous: true,
|
||||
})
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": errors.New(expectedMsg),
|
||||
}).Error("Bugsnag will not see this string")
|
||||
|
||||
select {
|
||||
case received := <-msg:
|
||||
if received != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Bugsnag API")
|
||||
}
|
||||
}
|
|
@ -11,11 +11,12 @@ type JSONFormatter struct{}
|
|||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
if err, ok := v.(error); ok {
|
||||
data[k] = err.Error()
|
||||
} else {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
|
84
vendor/src/github.com/Sirupsen/logrus/logger.go
vendored
|
@ -65,11 +65,15 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
|
|||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
|
@ -77,31 +81,45 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
NewEntry(logger).Debug(args...)
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debug(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
|
@ -109,31 +127,45 @@ func (logger *Logger) Print(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
NewEntry(logger).Error(args...)
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Error(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
NewEntry(logger).Panic(args...)
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panic(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
|
@ -141,21 +173,31 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
|
|
@ -3,7 +3,6 @@ package logrus
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -21,7 +20,6 @@ const (
|
|||
var (
|
||||
baseTimestamp time.Time
|
||||
isTerminal bool
|
||||
noQuoteNeeded *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"runtime"
|
||||
)
|
||||
|
||||
func (logger *Logger) Writer() (*io.PipeWriter) {
|
||||
func (logger *Logger) Writer() *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
go logger.writerScanner(reader)
|
||||
|
|