Merge pull request #1779 from fcrisciani/revert-1777-move_provider

Revert "Move Cluster provider back to Moby"
This commit is contained in:
Madhu Venugopal 2017-05-28 14:01:11 -07:00 committed by GitHub
commit 430367de2d
113 changed files with 1795 additions and 3401 deletions

View file

@ -11,9 +11,9 @@ import (
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-events"
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
@ -193,7 +193,7 @@ func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error {
return nil
}
func (c *controller) agentSetup(clusterProvider provider.Cluster) error {
func (c *controller) agentSetup(clusterProvider cluster.Provider) error {
agent := c.getAgent()
// If the agent is already present there is no need to try to initilize it again

View file

@ -0,0 +1,36 @@
package cluster
import (
"github.com/docker/docker/api/types/network"
"golang.org/x/net/context"
)
const (
// EventSocketChange control socket changed
EventSocketChange = iota
// EventNodeReady cluster node in ready state
EventNodeReady
// EventNodeLeave node is leaving the cluster
EventNodeLeave
// EventNetworkKeysAvailable network keys correctly configured in the networking layer
EventNetworkKeysAvailable
)
// ConfigEventType type of the event produced by the cluster
type ConfigEventType uint8
// Provider provides clustering config details
type Provider interface {
IsManager() bool
IsAgent() bool
GetLocalAddress() string
GetListenAddress() string
GetAdvertiseAddress() string
GetDataPathAddress() string
GetRemoteAddressList() []string
ListenClusterEvents() <-chan ConfigEventType
AttachNetwork(string, string, []string) (*network.NetworkingConfig, error)
DetachNetwork(string, string) error
UpdateAttachment(string, string, *network.NetworkingConfig) error
WaitForDetachment(context.Context, string, string, string, string) error
}

View file

@ -24,10 +24,10 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/pkg/term"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/api"
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/driverapi"
@ -235,7 +235,7 @@ type dnetConnection struct {
// addr holds the client address.
addr string
Orchestration *NetworkOrchestration
configEvent chan provider.ClusterConfigEventType
configEvent chan cluster.ConfigEventType
}
// NetworkOrchestration exported
@ -276,7 +276,7 @@ func (d *dnetConnection) dnetDaemon(cfgFile string) error {
controller.SetClusterProvider(d)
if d.Orchestration.Agent || d.Orchestration.Manager {
d.configEvent <- provider.ClusterEventNodeReady
d.configEvent <- cluster.EventNodeReady
}
createDefaultNetwork(controller)
@ -336,7 +336,7 @@ func (d *dnetConnection) GetNetworkKeys() []*types.EncryptionKey {
func (d *dnetConnection) SetNetworkKeys([]*types.EncryptionKey) {
}
func (d *dnetConnection) ListenClusterEvents() <-chan provider.ClusterConfigEventType {
func (d *dnetConnection) ListenClusterEvents() <-chan cluster.ConfigEventType {
return d.configEvent
}
@ -439,7 +439,7 @@ func newDnetConnection(val string) (*dnetConnection, error) {
return nil, errors.New("dnet currently only supports tcp transport")
}
return &dnetConnection{protoAddrParts[0], protoAddrParts[1], &NetworkOrchestration{}, make(chan provider.ClusterConfigEventType, 10)}, nil
return &dnetConnection{protoAddrParts[0], protoAddrParts[1], &NetworkOrchestration{}, make(chan cluster.ConfigEventType, 10)}, nil
}
func (d *dnetConnection) httpCall(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, http.Header, int, error) {

View file

@ -5,11 +5,11 @@ import (
"github.com/BurntSushi/toml"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/go-connections/tlsconfig"
"github.com/docker/libkv/store"
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/osl"
@ -33,7 +33,7 @@ type DaemonCfg struct {
DefaultDriver string
Labels []string
DriverCfg map[string]interface{}
ClusterProvider provider.Cluster
ClusterProvider cluster.Provider
}
// ClusterCfg represents cluster configuration

View file

@ -53,12 +53,12 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/pkg/locker"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
@ -123,7 +123,7 @@ type NetworkController interface {
ReloadConfiguration(cfgOptions ...config.Option) error
// SetClusterProvider sets cluster provider
SetClusterProvider(provider provider.Cluster)
SetClusterProvider(provider cluster.Provider)
// Wait for agent initialization complete in libnetwork controller
AgentInitWait()
@ -243,7 +243,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
return c, nil
}
func (c *controller) SetClusterProvider(provider provider.Cluster) {
func (c *controller) SetClusterProvider(provider cluster.Provider) {
var sameProvider bool
c.Lock()
// Avoids to spawn multiple goroutine for the same cluster provider
@ -307,17 +307,17 @@ func (c *controller) clusterAgentInit() {
var keysAvailable bool
for {
eventType := <-clusterProvider.ListenClusterEvents()
// The events: ClusterEventSocketChange, ClusterEventNodeReady and ClusterEventNetworkKeysAvailable are not ordered
// The events: EventSocketChange, EventNodeReady and EventNetworkKeysAvailable are not ordered
// when all the condition for the agent initialization are met then proceed with it
switch eventType {
case provider.ClusterEventNetworkKeysAvailable:
case cluster.EventNetworkKeysAvailable:
// Validates that the keys are actually available before starting the initialization
// This will handle old spurious messages left on the channel
c.Lock()
keysAvailable = c.keys != nil
c.Unlock()
fallthrough
case provider.ClusterEventSocketChange, provider.ClusterEventNodeReady:
case cluster.EventSocketChange, cluster.EventNodeReady:
if keysAvailable && !c.isDistributedControl() {
c.agentOperationStart()
if err := c.agentSetup(clusterProvider); err != nil {
@ -326,7 +326,7 @@ func (c *controller) clusterAgentInit() {
c.agentInitComplete()
}
}
case provider.ClusterEventNodeLeave:
case cluster.EventNodeLeave:
keysAvailable = false
c.agentOperationStart()
c.Lock()

View file

@ -11,7 +11,7 @@ github.com/coreos/etcd 925d1d74cec8c3b169c52fd4b2dc234a35934fce
github.com/coreos/go-systemd b4a58d95188dd092ae20072bac14cece0e67c388
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
github.com/docker/docker e18f50891a92786c43d467e012a2404edab416d3 https://github.com/fcrisciani/docker
github.com/docker/docker 9c96768eae4b3a65147b47a55c850c103ab8972d
github.com/docker/go-connections 34b5052da6b11e27f5f2e357b38b571ddddd3928
github.com/docker/go-events 2e7d352816128aa84f4d29b2a21d400133701a0d
github.com/docker/go-units 8e2d4523730c73120e10d4652f36ad6010998f4e
@ -19,11 +19,11 @@ github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
github.com/godbus/dbus 5f6efc7ef2759c81b7ba876593971bfce311eab3
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
github.com/golang/protobuf f7137ae6b19afbfd61a94b746fda3b3fe0491874
github.com/golang/protobuf/proto f7137ae6b19afbfd61a94b746fda3b3fe0491874
github.com/gorilla/context 215affda49addc4c8ef7e2534915df2c8c35c6cd
github.com/gorilla/mux 8096f47503459bcc74d1f4c487b7e6e42e5746b5
github.com/hashicorp/consul 954aec66231b79c161a4122b023fbcad13047f79
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
github.com/hashicorp/consul/api 954aec66231b79c161a4122b023fbcad13047f79
github.com/hashicorp/go-msgpack/codec 71c2886f5a673a35f909803f38ece5810165097b
github.com/hashicorp/go-multierror 2167c8ec40776024589f483a6b836489e47e1049
github.com/hashicorp/memberlist v0.1.0
github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
@ -31,13 +31,11 @@ github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
github.com/mattn/go-shellwords 525bedee691b5a8df547cb5cf9f86b7fb1883e24
github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
github.com/opencontainers/runc ba1568de399395774ad84c2ace65937814c542ed
github.com/opencontainers/image-spec 56b55a17598362bd1bf78c9c307738335a2510eb
github.com/opencontainers/go-digest eaa60544f31ccf3b0653b1a118b76d33418ff41b
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/opencontainers/runc/libcontainer ba1568de399395774ad84c2ace65937814c542ed
github.com/samuel/go-zookeeper/zk d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/syndtr/gocapability/capability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
github.com/vishvananda/netlink 1e86b2bee5b6a7d377e4c02bb7f98209d6a7297c
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25

View file

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,5 +1,5 @@
Docker
Copyright 2012-2017 Docker, Inc.
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).

View file

@ -1,80 +1,270 @@
### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest)
============================
### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
Docker is an open source project to pack, ship and run any application
as a lightweight container.
The Moby Project
================
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means they can run anywhere, from your laptop to the largest
cloud compute instance and everything in between - and they don't require
you to use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases, and backend services without depending on a particular stack
or provider.
![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
Docker began as an open-source implementation of the deployment engine which
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
a popular Platform-as-a-Service. It benefits directly from the experience
accumulated over several years of large-scale operation and support of hundreds
of thousands of applications and databases.
Moby is an open-source project created by Docker to advance the software containerization movement.
It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
![Docker logo](docs/static_files/docker-logo-compressed.png "Docker")
# Moby
## Security Disclosure
## Overview
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a GitHub issue.
At the core of Moby is a framework to assemble specialized container systems.
It provides:
## Better than VMs
- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
## Principles
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](https://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](https://linuxcontainers.org/), Solaris with
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
and FreeBSD with
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
The guiding principles are:
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all four problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable, and are designed from the ground up with an
application-centric design.
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Usable security: Moby will provide secure defaults without compromising usability.
- Container centric: Moby is built with containers, for running containers.
Perhaps best of all, because Docker operates at the OS level, it can still be
run inside a VM!
With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
## Plays well with others
Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
Docker does not require you to buy into a particular programming
language, framework, packaging system, or configuration language.
## Audience
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Moby is recommended for anyone who wants to assemble a container-based system. This includes:
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
- Hackers who want to customize or patch their Docker build
- System engineers or integrators building a container system
- Infrastructure providers looking to adapt existing container systems to their environment
- Container enthusiasts who want to experiment with the latest container tech
- Open-source developers looking to test their project in a variety of different systems
- Anyone curious about Docker internals and how its built
## Escape dependency hell
Moby is NOT recommended for:
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead.
This is usually difficult for several reasons:
# Transitioning to Moby
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
Docker is transitioning all of its open source collaborations to the Moby project going forward.
During the transition, all open source activity should continue as usual.
* *Conflicting dependencies*. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
We are proposing the following list of changes:
* *Custom dependencies*. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
- splitting up the engine into more open components
- removing the docker UI, SDK etc to keep them in the Docker org
- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
-----
Docker solves the problem of dependency hell by giving the developer a simple
way to express *all* their application's dependencies in one place, while
streamlining the process of assembling them. If this makes you think of
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Legal
=====
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
Here's a typical Docker build process:
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Getting started
===============
Docker can be installed either on your computer for building applications or
on servers for running them. To get started, [check out the installation
instructions in the
documentation](https://docs.docker.com/engine/installation/).
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases, etc.), interactive shell sessions, etc.
You can find a [list of real-world
examples](https://docs.docker.com/engine/examples/) in the
documentation.
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
and
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
capabilities of the Linux kernel
* The [Go](https://golang.org) programming language
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
======================
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|------------------|----------------------|---------|---------|
| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
Want to hack on Docker? Awesome! We have [instructions to help you get
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
These instructions are probably not perfect, please let us know if anything
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
Getting the development builds
==============================
Want to run Docker from a master build? You can download
master builds at [master.dockerproject.org](https://master.dockerproject.org).
They are updated with each commit merged into the master branch.
Don't know how to use that super cool new feature in the master build? Check
out the master docs at
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
How the project is run
======================
Docker is a very, very active project. If you want to learn more about how it is run,
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
We are always open to suggestions on process improvements, and are always looking for more maintainers.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC is a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td>
</tr>
<tr>
<td>Docker Community Forums</td>
<td>
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
group is for users of the Docker Engine project.
</td>
</tr>
<tr>
<td>Google Groups</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
target="_blank">docker-dev</a> group is for contributors and other people
contributing to the Docker project. You can join this group without a
Google account by sending an email to <a
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
You'll receive a join-request message; simply reply to the message to
confirm your subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 7000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
Use and transfer of Moby may be subject to certain restrictions by the
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
@ -85,6 +275,30 @@ For more information, please see https://www.bis.doc.gov
Licensing
=========
Moby is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
license text.
Other Docker Related Projects
=============================
There are a number of projects under development that are based on Docker's
core technology. These projects expand the tooling built around the
Docker platform to broaden its application and utility.
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
system
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
Define and run multi-container apps
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
Docker on Mac and Windows
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
Awesome-Docker
==============
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.

View file

@ -14,8 +14,8 @@ It consists of various components in this repository:
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. Automatically generate documentation.
2. Automatically generate the Go server and client. (A work-in-progress.)
1. To automatically generate documentation.
2. To automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation

View file

@ -4,6 +4,7 @@ import (
"bufio"
"io"
"net"
"os"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
@ -97,7 +98,6 @@ type ContainerStartOptions struct {
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
CopyUIDGID bool
}
// EventsOptions holds parameters to filter events with.
@ -160,10 +160,9 @@ type ImageBuildOptions struct {
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
// BuildArgs needs to be a *string instead of just a string so that
// we can tell the difference between "" (empty string) and no value
// at all (nil). See the parsing of buildArgs in
// api/server/router/build/build_routes.go for even more info.
// See the parsing of buildArgs in api/server/router/build/build_routes.go
// for an explaination of why BuildArgs needs to use *string instead of
// just a string
BuildArgs map[string]*string
AuthConfigs map[string]AuthConfig
Context io.Reader
@ -176,8 +175,6 @@ type ImageBuildOptions struct {
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
ExtraHosts []string // List of extra hosts
Target string
}
// ImageBuildResponse holds information
@ -195,8 +192,8 @@ type ImageCreateOptions struct {
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
}
// ImageImportOptions holds information to import images from the client host.
@ -259,6 +256,18 @@ type ResizeOptions struct {
Width uint
}
// VersionResponse holds version information for the client and the server
type VersionResponse struct {
Client *Version
Server *Version
}
// ServerOK returns true when the client could connect to the docker server
// and parse the information received. It returns false otherwise.
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filters filters.Args
@ -276,12 +285,6 @@ type ServiceCreateOptions struct {
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceCreateResponse contains the information returned to a client
@ -315,32 +318,14 @@ type ServiceUpdateOptions struct {
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
// Rollback indicates whether a server-side rollback should be
// performed. When this is set, the provided spec will be ignored.
// The valid values are "previous" and "none". An empty value is the
// same as "none".
Rollback string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceListOptions holds parameters to list services with.
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filters filters.Args
}
// ServiceInspectOptions holds parameters related to the "service inspect"
// operation.
type ServiceInspectOptions struct {
InsertDefaults bool
}
// TaskListOptions holds parameters to list tasks with.
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filters filters.Args
}
@ -371,6 +356,15 @@ type PluginInstallOptions struct {
Args []string
}
// SecretRequestOption is a type for requesting secrets
type SecretRequestOption struct {
Source string
Target string
UID string
GID string
Mode os.FileMode
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {

View file

@ -7,12 +7,6 @@ import (
"github.com/docker/go-connections/nat"
)
// MinimumDuration puts a minimum on user configured duration.
// This is to prevent API error on time unit. For example, API may
// set 3 as healthcheck interval with intention of 3 seconds, but
// Docker interprets it as 3 nanoseconds.
const MinimumDuration = 1 * time.Millisecond
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
@ -25,9 +19,8 @@ type HealthConfig struct {
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.

View file

@ -1,21 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerChangeResponseItem container change response item
// swagger:model ContainerChangeResponseItem
type ContainerChangeResponseItem struct {
// Kind of change
// Required: true
Kind uint8 `json:"Kind"`
// Path to file that has changed
// Required: true
Path string `json:"Path"`
}

View file

@ -1,21 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerTopOKBody container top o k body
// swagger:model ContainerTopOKBody
type ContainerTopOKBody struct {
// Each process running in the container, where each is process is an array of values corresponding to the titles
// Required: true
Processes [][]string `json:"Processes"`
// The ps column titles
// Required: true
Titles []string `json:"Titles"`
}

View file

@ -10,6 +10,9 @@ import (
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
@ -63,47 +66,6 @@ func (n IpcMode) Container() string {
return ""
}
// NetworkMode represents the container network stack.
type NetworkMode string
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
@ -261,17 +223,6 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogMode is a type to define the available modes for logging
// These modes affect how logs are handled when log messages start piling up.
type LogMode string
// Available logging modes
const (
LogModeUnset = ""
LogModeBlocking LogMode = "blocking"
LogModeNonBlock LogMode = "non-blocking"
)
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
@ -300,7 +251,6 @@ type Resources struct {
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DeviceCgroupRules []string // List of rule to be added to the device cgroup
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
@ -377,4 +327,7 @@ type HostConfig struct {
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View file

@ -2,11 +2,23 @@
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
@ -35,7 +47,35 @@ func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View file

@ -4,6 +4,22 @@ import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
@ -16,9 +32,20 @@ func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
@ -44,11 +71,17 @@ func (n NetworkMode) NetworkName() string {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsContainer() {
return "container"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View file

@ -1,22 +0,0 @@
package container
// WaitCondition is a type used to specify a container state for which
// to wait.
type WaitCondition string
// Possible WaitCondition Values.
//
// WaitConditionNotRunning (default) is used to wait for any of the non-running
// states: "created", "exited", "dead", "removing", or "removed".
//
// WaitConditionNextExit is used to wait for the next time the state changes
// to a non-running state. If the state is currently "created" or "exited",
// this would cause Wait() to block until either the container runs and exits
// or is removed.
//
// WaitConditionRemoved is used to wait for the container to be removed.
const (
WaitConditionNotRunning WaitCondition = "not-running"
WaitConditionNextExit WaitCondition = "next-exit"
WaitConditionRemoved WaitCondition = "removed"
)

View file

@ -79,8 +79,8 @@ func ToParamWithVersion(version string, a Args) (string, error) {
}
// for daemons older than v1.10, filter must be of the form map[string][]string
var buf []byte
var err error
buf := []byte{}
err := errors.New("")
if version != "" && versions.LessThan(version, "1.22") {
buf, err = json.Marshal(convertArgsToSlice(a.fields))
} else {

View file

@ -1,17 +0,0 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// GraphDriverData Information about a container's graph driver.
// swagger:model GraphDriverData
type GraphDriverData struct {
// data
// Required: true
Data map[string]string `json:"Data"`
// name
// Required: true
Name string `json:"Name"`
}

View file

@ -1,15 +0,0 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageDeleteResponseItem image delete response item
// swagger:model ImageDeleteResponseItem
type ImageDeleteResponseItem struct {
// The image ID of an image that was deleted
Deleted string `json:"Deleted,omitempty"`
// The image ID of an image that was untagged
Untagged string `json:"Untagged,omitempty"`
}

View file

@ -23,10 +23,9 @@ type Mount struct {
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
// Source is not supported for tmpfs (must be an empty value)
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Consistency Consistency `json:",omitempty"`
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
BindOptions *BindOptions `json:",omitempty"`
VolumeOptions *VolumeOptions `json:",omitempty"`
@ -61,20 +60,6 @@ var Propagations = []Propagation{
PropagationSlave,
}
// Consistency represents the consistency requirements of a mount.
type Consistency string
const (
// ConsistencyFull guarantees bind-mount-like consistency
ConsistencyFull Consistency = "consistent"
// ConsistencyCached mounts can cache read data and FS structure
ConsistencyCached Consistency = "cached"
// ConsistencyDelegated mounts can cache read and written data and structure
ConsistencyDelegated Consistency = "delegated"
// ConsistencyDefault provides "consistent" behavior unless overridden
ConsistencyDefault Consistency = "default"
)
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation Propagation `json:",omitempty"`
@ -98,7 +83,7 @@ type TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be converted to
// depending on the host. For example, on linux, it will be convered to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//

View file

@ -28,14 +28,6 @@ type EndpointIPAMConfig struct {
LinkLocalIPs []string `json:",omitempty"`
}
// Copy makes a copy of the endpoint ipam config
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
cfgCopy := *cfg
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
return &cfgCopy
}
// PeerInfo represents one peer of an overlay network
type PeerInfo struct {
Name string
@ -58,42 +50,6 @@ type EndpointSettings struct {
GlobalIPv6Address string
GlobalIPv6PrefixLen int
MacAddress string
DriverOpts map[string]string
}
// Task carries the information about one backend task
type Task struct {
Name string
EndpointID string
EndpointIP string
Info map[string]string
}
// ServiceInfo represents service parameters with the list of service's tasks
type ServiceInfo struct {
VIP string
Ports []string
LocalLBIndex int
Tasks []Task
}
// Copy makes a deep copy of `EndpointSettings`
func (es *EndpointSettings) Copy() *EndpointSettings {
epCopy := *es
if es.IPAMConfig != nil {
epCopy.IPAMConfig = es.IPAMConfig.Copy()
}
if es.Links != nil {
links := make([]string, 0, len(es.Links))
epCopy.Links = append(links, es.Links...)
}
if es.Aliases != nil {
aliases := make([]string, 0, len(es.Aliases))
epCopy.Aliases = append(aliases, es.Aliases...)
}
return &epCopy
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
@ -101,8 +57,3 @@ func (es *EndpointSettings) Copy() *EndpointSettings {
type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
}
// ConfigReference specifies the source which provides a network's configuration
type ConfigReference struct {
Network string
}

View file

@ -22,9 +22,6 @@ type Plugin struct {
// Required: true
Name string `json:"Name"`
// plugin remote reference used to push/pull the plugin
PluginReference string `json:"PluginReference,omitempty"`
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
@ -42,9 +39,6 @@ type PluginConfig struct {
// Required: true
Description string `json:"Description"`
// Docker Version used to create the plugin
DockerVersion string `json:"DockerVersion,omitempty"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
@ -61,10 +55,6 @@ type PluginConfig struct {
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// ipc host
// Required: true
IpcHost bool `json:"IpcHost"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
@ -77,10 +67,6 @@ type PluginConfig struct {
// Required: true
Network PluginConfigNetwork `json:"Network"`
// pid host
// Required: true
PidHost bool `json:"PidHost"`
// propagated mount
// Required: true
PropagatedMount string `json:"PropagatedMount"`
@ -134,14 +120,14 @@ type PluginConfigInterface struct {
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// allow all devices
// Required: true
AllowAllDevices bool `json:"AllowAllDevices"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// device creation
// Required: true
DeviceCreation bool `json:"DeviceCreation"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`

View file

@ -3,7 +3,6 @@ package types
import (
"encoding/json"
"fmt"
"sort"
)
// PluginsListResponse contains the response for the Engine API
@ -63,17 +62,3 @@ type PluginPrivilege struct {
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege
func (s PluginPrivileges) Len() int {
return len(s)
}
func (s PluginPrivileges) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PluginPrivileges) Swap(i, j int) {
sort.Strings(s[i].Value)
sort.Strings(s[j].Value)
s[i], s[j] = s[j], s[i]
}

View file

@ -3,17 +3,13 @@ package registry
import (
"encoding/json"
"net"
"github.com/opencontainers/image-spec/specs-go/v1"
)
// ServiceConfig stores daemon registry services configuration.
type ServiceConfig struct {
AllowNondistributableArtifactsCIDRs []*NetIPNet
AllowNondistributableArtifactsHostnames []string
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
}
// NetIPNet is the net.IPNet type, which can be marshalled and
@ -106,14 +102,3 @@ type SearchResults struct {
// Results is a slice containing the actual results for the search
Results []SearchResult `json:"results"`
}
// DistributionInspect describes the result obtained from contacting the
// registry to retrieve image metadata
type DistributionInspect struct {
// Descriptor contains information about the manifest, including
// the content addressable digest
Descriptor v1.Descriptor
// Platforms contains the list of platforms supported by the image,
// obtained by parsing the manifest
Platforms []v1.Platform
}

View file

@ -47,9 +47,6 @@ type CPUStats struct {
// System Usage. Linux only.
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
// Online CPUs. Linux only.
OnlineCPUs uint32 `json:"online_cpus,omitempty"`
// Throttling Data. Linux only.
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}

View file

@ -17,7 +17,7 @@ type Meta struct {
// Annotations represents how to describe an object.
type Annotations struct {
Name string `json:",omitempty"`
Labels map[string]string `json:"Labels"`
Labels map[string]string `json:",omitempty"`
}
// Driver represents a driver (network, logging).
@ -25,16 +25,3 @@ type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TLSInfo represents the TLS information about what CA certificate is trusted,
// and who the issuer for a TLS certificate is
type TLSInfo struct {
// TrustRoot is the trusted CA root certificate in PEM format
TrustRoot string `json:",omitempty"`
// CertIssuer is the raw subject bytes of the issuer
CertIssuerSubject []byte `json:",omitempty"`
// CertIssuerPublicKey is the raw public key bytes of the issuer
CertIssuerPublicKey []byte `json:",omitempty"`
}

View file

@ -1,31 +0,0 @@
package swarm
import "os"
// Config represents a config.
type Config struct {
ID string
Meta
Spec ConfigSpec
}
// ConfigSpec represents a config specification from a config in swarm
type ConfigSpec struct {
Annotations
Data []byte `json:",omitempty"`
}
// ConfigReferenceFileTarget is a file target in a config reference
type ConfigReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// ConfigReference is a reference to a config in swarm
type ConfigReference struct {
File *ConfigReferenceFileTarget
ConfigID string
ConfigName string
}

View file

@ -21,28 +21,6 @@ type DNSConfig struct {
Options []string `json:",omitempty"`
}
// SELinuxContext contains the SELinux labels of the container.
type SELinuxContext struct {
Disable bool
User string
Role string
Type string
Level string
}
// CredentialSpec for managed service account (Windows only)
type CredentialSpec struct {
File string
Registry string
}
// Privileges defines the security options for the container.
type Privileges struct {
CredentialSpec *CredentialSpec
SELinuxContext *SELinuxContext
}
// ContainerSpec represents the spec of a container.
type ContainerSpec struct {
Image string `json:",omitempty"`
@ -54,11 +32,8 @@ type ContainerSpec struct {
Dir string `json:",omitempty"`
User string `json:",omitempty"`
Groups []string `json:",omitempty"`
Privileges *Privileges `json:",omitempty"`
StopSignal string `json:",omitempty"`
TTY bool `json:",omitempty"`
OpenStdin bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Mounts []mount.Mount `json:",omitempty"`
StopGracePeriod *time.Duration `json:",omitempty"`
Healthcheck *container.HealthConfig `json:",omitempty"`
@ -68,5 +43,4 @@ type ContainerSpec struct {
Hosts []string `json:",omitempty"`
DNSConfig *DNSConfig `json:",omitempty"`
Secrets []*SecretReference `json:",omitempty"`
Configs []*ConfigReference `json:",omitempty"`
}

View file

@ -1,9 +1,5 @@
package swarm
import (
"github.com/docker/docker/api/types/network"
)
// Endpoint represents an endpoint.
type Endpoint struct {
Spec EndpointSpec `json:",omitempty"`
@ -82,21 +78,17 @@ type Network struct {
// NetworkSpec represents the spec of a network.
type NetworkSpec struct {
Annotations
DriverConfiguration *Driver `json:",omitempty"`
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
Ingress bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
ConfigFrom *network.ConfigReference `json:",omitempty"`
Scope string `json:",omitempty"`
DriverConfiguration *Driver `json:",omitempty"`
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
}
// NetworkAttachmentConfig represents the configuration of a network attachment.
type NetworkAttachmentConfig struct {
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
DriverOpts map[string]string `json:",omitempty"`
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
}
// NetworkAttachment represents a network attachment.

View file

@ -52,7 +52,6 @@ type NodeDescription struct {
Platform Platform `json:",omitempty"`
Resources Resources `json:",omitempty"`
Engine EngineDescription `json:",omitempty"`
TLSInfo TLSInfo `json:",omitempty"`
}
// Platform represents the platform (Arch/OS).

View file

@ -1,19 +0,0 @@
package swarm
// RuntimeType is the type of runtime used for the TaskSpec
type RuntimeType string
// RuntimeURL is the proto type url
type RuntimeURL string
const (
// RuntimeContainer is the container based runtime
RuntimeContainer RuntimeType = "container"
// RuntimePlugin is the plugin based runtime
RuntimePlugin RuntimeType = "plugin"
// RuntimeURLContainer is the proto url for the container type
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
// RuntimeURLPlugin is the proto url for the plugin type
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
)

View file

@ -18,10 +18,9 @@ type ServiceSpec struct {
// TaskTemplate defines how the service should construct new tasks when
// orchestrating this service.
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`
RollbackConfig *UpdateConfig `json:",omitempty"`
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`
// Networks field in ServiceSpec is deprecated. The
// same field in TaskSpec should be used instead.
@ -46,12 +45,6 @@ const (
UpdateStatePaused UpdateState = "paused"
// UpdateStateCompleted is the completed state.
UpdateStateCompleted UpdateState = "completed"
// UpdateStateRollbackStarted is the state with a rollback in progress.
UpdateStateRollbackStarted UpdateState = "rollback_started"
// UpdateStateRollbackPaused is the state with a rollback in progress.
UpdateStateRollbackPaused UpdateState = "rollback_paused"
// UpdateStateRollbackCompleted is the state with a rollback in progress.
UpdateStateRollbackCompleted UpdateState = "rollback_completed"
)
// UpdateStatus reports the status of a service update.
@ -75,13 +68,6 @@ const (
UpdateFailureActionPause = "pause"
// UpdateFailureActionContinue CONTINUE
UpdateFailureActionContinue = "continue"
// UpdateFailureActionRollback ROLLBACK
UpdateFailureActionRollback = "rollback"
// UpdateOrderStopFirst STOP_FIRST
UpdateOrderStopFirst = "stop-first"
// UpdateOrderStartFirst START_FIRST
UpdateOrderStartFirst = "start-first"
)
// UpdateConfig represents the update configuration.
@ -116,9 +102,4 @@ type UpdateConfig struct {
// If the failure action is PAUSE, no more tasks will be updated until
// another update is started.
MaxFailureRatio float32
// Order indicates the order of operations when rolling out an updated
// task. Either the old task is shut down before the new task is
// started, or the new task is started before the old task is shut down.
Order string
}

View file

@ -7,9 +7,7 @@ import "time"
type ClusterInfo struct {
ID string
Meta
Spec Spec
TLSInfo TLSInfo
RootRotationInProgress bool
Spec Spec
}
// Swarm represents a swarm.
@ -109,16 +107,6 @@ type CAConfig struct {
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
ExternalCAs []*ExternalCA `json:",omitempty"`
// SigningCACert and SigningCAKey specify the desired signing root CA and
// root CA key for the swarm. When inspecting the cluster, the key will
// be redacted.
SigningCACert string `json:",omitempty"`
SigningCAKey string `json:",omitempty"`
// If this value changes, and there is no specified signing cert and key,
// then the swarm is forced to generate a new root certificate ane key.
ForceRotate uint64 `json:",omitempty"`
}
// ExternalCAProtocol represents type of external CA.
@ -138,31 +126,23 @@ type ExternalCA struct {
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `json:",omitempty"`
// CACert specifies which root CA is used by this external CA. This certificate must
// be in PEM format.
CACert string
}
// InitRequest is the request used to init a swarm.
type InitRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
ForceNewCluster bool
Spec Spec
AutoLockManagers bool
Availability NodeAvailability
}
// JoinRequest is the request used to join a swarm.
type JoinRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
RemoteAddrs []string
JoinToken string // accept by secret
Availability NodeAvailability
}
// UnlockRequest is the request used to unlock a swarm.
@ -197,10 +177,10 @@ type Info struct {
Error string
RemoteManagers []Peer
Nodes int `json:",omitempty"`
Managers int `json:",omitempty"`
Nodes int
Managers int
Cluster *ClusterInfo `json:",omitempty"`
Cluster ClusterInfo
}
// Peer represents a peer.

View file

@ -65,8 +65,6 @@ type TaskSpec struct {
// ForceUpdate is a counter that triggers an update even if no relevant
// parameters have been changed.
ForceUpdate uint64
Runtime RuntimeType `json:",omitempty"`
}
// Resources represents resources (CPU/Memory).
@ -83,26 +81,7 @@ type ResourceRequirements struct {
// Placement represents orchestration parameters.
type Placement struct {
Constraints []string `json:",omitempty"`
Preferences []PlacementPreference `json:",omitempty"`
// Platforms stores all the platforms that the image can run on.
// This field is used in the platform filter for scheduling. If empty,
// then the platform filter is off, meaning there are no scheduling restrictions.
Platforms []Platform `json:",omitempty"`
}
// PlacementPreference provides a way to make the scheduler aware of factors
// such as topology.
type PlacementPreference struct {
Spread *SpreadOver
}
// SpreadOver is a scheduling preference that instructs the scheduler to spread
// tasks evenly over groups of nodes identified by labels.
type SpreadOver struct {
// label descriptor, such as engine.labels.az
SpreadDescriptor string
Constraints []string `json:",omitempty"`
}
// RestartPolicy represents the restart policy.

View file

@ -17,6 +17,38 @@ import (
"github.com/docker/go-connections/nat"
)
// ContainerChange contains response of Engine API:
// GET "/containers/{name:.*}/changes"
type ContainerChange struct {
Kind int
Path string
}
// ImageHistory contains response of Engine API:
// GET "/images/{name:.*}/history"
type ImageHistory struct {
ID string `json:"Id"`
Created int64
CreatedBy string
Tags []string
Size int64
Comment string
}
// ImageDelete contains response of Engine API:
// DELETE "/images/{name:.*}"
type ImageDelete struct {
Untagged string `json:",omitempty"`
Deleted string `json:",omitempty"`
}
// GraphDriverData returns Image's graph driver config info
// when calling inspect command
type GraphDriverData struct {
Name string
Data map[string]string
}
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string
@ -93,11 +125,17 @@ type ContainerStats struct {
OSType string `json:"ostype"`
}
// ContainerProcessList contains response of Engine API:
// GET "/containers/{name:.*}/top"
type ContainerProcessList struct {
Processes [][]string
Titles []string
}
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
OSType string
Experimental bool
}
@ -238,8 +276,6 @@ type PluginsInfo struct {
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// ExecStartCheck is a temp struct used by execStart
@ -277,7 +313,7 @@ type Health struct {
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Status string
Running bool
Paused bool
Restarting bool
@ -393,23 +429,19 @@ type MountPoint struct {
// NetworkResource is the body of the "get network" http response message
type NetworkResource struct {
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
Services map[string]network.ServiceInfo `json:",omitempty"`
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
}
// EndpointResource contains network resources allocated and used for a container in a network
@ -423,23 +455,12 @@ type EndpointResource struct {
// NetworkCreate is the expected body of the "create network" http request message
type NetworkCreate struct {
// Check for networks with duplicate names.
// Network is primarily keyed based on a random ID and not on the name.
// Network name is strictly a user-friendly alias to the network
// which is uniquely identified using ID.
// And there is no guaranteed way to check for duplicates.
// Option CheckDuplicate is there to provide a best effort checking of any networks
// which has the same name but it is not guaranteed to catch all name collisions.
CheckDuplicate bool
Driver string
Scope string
EnableIPv6 bool
IPAM *network.IPAM
Internal bool
Attachable bool
Ingress bool
ConfigOnly bool
ConfigFrom *network.ConfigReference
Options map[string]string
Labels map[string]string
}
@ -505,7 +526,7 @@ type VolumesPruneReport struct {
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDeleteResponseItem
ImagesDeleted []ImageDelete
SpaceReclaimed uint64
}
@ -527,18 +548,6 @@ type SecretListOptions struct {
Filters filters.Args
}
// ConfigCreateResponse contains the information returned to a client
// on the creation of a new config.
type ConfigCreateResponse struct {
// ID is the id of the created config.
ID string
}
// ConfigListOptions holds parameters to list configs
type ConfigListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
@ -547,8 +556,3 @@ type PushResult struct {
Digest string
Size int
}
// BuildResult contains the image id of a successful build
type BuildResult struct {
ID string
}

View file

@ -1,36 +0,0 @@
package provider
import (
"github.com/docker/docker/api/types/network"
"golang.org/x/net/context"
)
const (
// ClusterEventSocketChange control socket changed
ClusterEventSocketChange = iota
// ClusterEventNodeReady cluster node in ready state
ClusterEventNodeReady
// ClusterEventNodeLeave node is leaving the cluster
ClusterEventNodeLeave
// ClusterEventNetworkKeysAvailable network keys correctly configured in the networking layer
ClusterEventNetworkKeysAvailable
)
// ClusterConfigEventType type of the event produced by the cluster
type ClusterConfigEventType uint8
// Cluster provides clustering config details
type Cluster interface {
IsManager() bool
IsAgent() bool
GetLocalAddress() string
GetListenAddress() string
GetAdvertiseAddress() string
GetDataPathAddress() string
GetRemoteAddressList() []string
ListenClusterEvents() <-chan ClusterConfigEventType
AttachNetwork(string, string, []string) (*network.NetworkingConfig, error)
DetachNetwork(string, string) error
UpdateAttachment(string, string, *network.NetworkingConfig) error
WaitForDetachment(context.Context, string, string, string, string) error
}

View file

@ -1,37 +0,0 @@
package provider
import "github.com/docker/docker/api/types"
// NetworkCreateRequest is a request when creating a network.
type NetworkCreateRequest struct {
ID string
types.NetworkCreateRequest
}
// NetworkCreateResponse is a response when creating a network.
type NetworkCreateResponse struct {
ID string `json:"Id"`
}
// VirtualAddress represents a virtual address.
type VirtualAddress struct {
IPv4 string
IPv6 string
}
// PortConfig represents a port configuration.
type PortConfig struct {
Name string
Protocol int32
TargetPort uint32
PublishedPort uint32
}
// ServiceConfig represents a service configuration.
type ServiceConfig struct {
ID string
Name string
Aliases map[string][]string
VirtualAddresses map[string]*VirtualAddress
ExposedPorts []*PortConfig
}

View file

@ -1,68 +0,0 @@
## About
This directory contains a collection of scripts used to build and manage this
repository. If there are any issues regarding the intention of a particular
script (or even part of a certain script), please reach out to us.
It may help us either refine our current scripts, or add on new ones
that are appropriate for a given use case.
## DinD (dind.sh)
DinD is a wrapper script which allows Docker to be run inside a Docker
container. DinD requires the container to
be run with privileged mode enabled.
## Generate Authors (generate-authors.sh)
Generates AUTHORS; a file with all the names and corresponding emails of
individual contributors. AUTHORS can be found in the home directory of
this repository.
## Install (install.sh)
Executable install script for installing Docker. If updates to this are
desired, please use hack/release.sh during a normal release. The following
one-liner may be used for script hotfixes:
- `aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index`
## Make
There are two make files, each with different extensions. Neither are supposed
to be called directly; only invoke `make`. Both scripts run inside a Docker
container.
### make.ps1
- The Windows native build script that uses PowerShell semantics; it is limited
unlike `hack\make.sh` since it does not provide support for the full set of
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
does provide support for local Windows development and Windows to Windows CI.
More information is found within `make.ps1` by the author, @jhowardmsft
### make.sh
- Referenced via `make test` when running tests on a local machine,
or directly referenced when running tests inside a Docker development container.
- When running on a local machine, `make test` to run all tests found in
`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on
your local machine. The default timeout is set in `make.sh` to 60 minutes
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
all of the tests.
- When running inside a Docker development container, `hack/make.sh` does
not have a single target that runs all the tests. You need to provide a
single command line with multiple targets that performs the same thing.
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py`
- For more information related to testing outside the scope of this README,
refer to
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
## Release (release.sh)
Releases any bundles built by `make` on a public AWS S3 bucket.
For information regarding configuration, please view `release.sh`.
## Vendor (vendor.sh)
A shell script that is a wrapper around Vndr. For information on how to use
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)

View file

@ -1,69 +0,0 @@
# Integration Testing on Swarm
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
## Architecture
### Master service
- Works as a funker caller
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
### Worker service
- Works as a funker callee
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
### Client
- Controls master and workers via `docker stack`
- No need to have a local daemon
Typically, the master and workers are supposed to be running on a cloud environment,
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
## Requirement
- Docker daemon 1.13 or later
- Private registry for distributed execution with multiple nodes
## Usage
### Step 1: Prepare images
$ make build-integration-cli-on-swarm
Following environment variables are known to work in this step:
- `BUILDFLAGS`
- `DOCKER_INCREMENTAL_BINARY`
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
### Step 2: Execute tests
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
Following environment variables are known to work in this step:
- `DOCKER_GRAPHDRIVER`
- `DOCKER_EXPERIMENTAL`
#### Flags
Basic flags:
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
Experimental flags for mitigating makespan nonuniformity:
- `-shuffle`: Shuffle the test filter strings
Flags for debugging IT on Swarm itself:
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- `-dry-run`: skip the actual workload
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm

View file

@ -1,2 +0,0 @@
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773

View file

@ -0,0 +1,171 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/go-units"
)
// MountOpt is a Value type for parsing mounts
type MountOpt struct {
values []mounttypes.Mount
}
// Set a new mount value
func (m *MountOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
mount := mounttypes.Mount{}
volumeOptions := func() *mounttypes.VolumeOptions {
if mount.VolumeOptions == nil {
mount.VolumeOptions = &mounttypes.VolumeOptions{
Labels: make(map[string]string),
}
}
if mount.VolumeOptions.DriverConfig == nil {
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
}
return mount.VolumeOptions
}
bindOptions := func() *mounttypes.BindOptions {
if mount.BindOptions == nil {
mount.BindOptions = new(mounttypes.BindOptions)
}
return mount.BindOptions
}
tmpfsOptions := func() *mounttypes.TmpfsOptions {
if mount.TmpfsOptions == nil {
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
}
return mount.TmpfsOptions
}
setValueOnMap := func(target map[string]string, value string) {
parts := strings.SplitN(value, "=", 2)
if len(parts) == 1 {
target[value] = ""
} else {
target[parts[0]] = parts[1]
}
}
mount.Type = mounttypes.TypeVolume // default to volume mounts
// Set writable as the default
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) == 1 {
switch key {
case "readonly", "ro":
mount.ReadOnly = true
continue
case "volume-nocopy":
volumeOptions().NoCopy = true
continue
}
}
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "type":
mount.Type = mounttypes.Type(strings.ToLower(value))
case "source", "src":
mount.Source = value
case "target", "dst", "destination":
mount.Target = value
case "readonly", "ro":
mount.ReadOnly, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
case "bind-propagation":
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
case "volume-nocopy":
volumeOptions().NoCopy, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for populate: %s", value)
}
case "volume-label":
setValueOnMap(volumeOptions().Labels, value)
case "volume-driver":
volumeOptions().DriverConfig.Name = value
case "volume-opt":
if volumeOptions().DriverConfig.Options == nil {
volumeOptions().DriverConfig.Options = make(map[string]string)
}
setValueOnMap(volumeOptions().DriverConfig.Options, value)
case "tmpfs-size":
sizeBytes, err := units.RAMInBytes(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().SizeBytes = sizeBytes
case "tmpfs-mode":
ui64, err := strconv.ParseUint(value, 8, 32)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().Mode = os.FileMode(ui64)
default:
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
if mount.Type == "" {
return fmt.Errorf("type is required")
}
if mount.Target == "" {
return fmt.Errorf("target is required")
}
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
}
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
}
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
}
m.values = append(m.values, mount)
return nil
}
// Type returns the type of this option
func (m *MountOpt) Type() string {
return "mount"
}
// String returns a string repr of this option
func (m *MountOpt) String() string {
mounts := []string{}
for _, mount := range m.values {
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
mounts = append(mounts, repr)
}
return strings.Join(mounts, ", ")
}
// Value returns the mounts
func (m *MountOpt) Value() []mounttypes.Mount {
return m.values
}

View file

@ -2,12 +2,13 @@ package opts
import (
"fmt"
"math/big"
"net"
"path"
"regexp"
"strings"
units "github.com/docker/go-units"
"github.com/docker/docker/api/types/filters"
)
var (
@ -36,10 +37,7 @@ func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
}
func (opts *ListOpts) String() string {
if len(*opts.values) == 0 {
return ""
}
return fmt.Sprintf("%v", *opts.values)
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and adds it to the
@ -234,6 +232,15 @@ func ValidateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
@ -263,6 +270,111 @@ func ValidateLabel(val string) (string, error) {
return val, nil
}
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
// NanoCPUs is a type for fixed point fractional number.
type NanoCPUs int64
// String returns the string format of the number
func (c *NanoCPUs) String() string {
return big.NewRat(c.Value(), 1e9).FloatString(3)
}
// Set sets the value of the NanoCPU by passing a string
func (c *NanoCPUs) Set(value string) error {
cpus, err := ParseCPUs(value)
*c = NanoCPUs(cpus)
return err
}
// Type returns the type
func (c *NanoCPUs) Type() string {
return "decimal"
}
// Value returns the value in int64
func (c *NanoCPUs) Value() int64 {
return int64(*c)
}
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
func ParseCPUs(value string) (int64, error) {
cpu, ok := new(big.Rat).SetString(value)
if !ok {
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
}
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
if !nano.IsInt() {
return 0, fmt.Errorf("value is too precise")
}
return nano.Num().Int64(), nil
}
// ParseLink parses and validates the specified string as a link format (name:alias)
func ParseLink(val string) (string, string, error) {
if val == "" {
@ -285,43 +397,8 @@ func ParseLink(val string) (string, string, error) {
return arr[0], arr[1], nil
}
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
type MemBytes int64
// String returns the string format of the human readable memory bytes
func (m *MemBytes) String() string {
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
// We return "0" in case value is 0 here so that the default value is hidden.
// (Sometimes "default 0 B" is actually misleading)
if m.Value() != 0 {
return units.BytesSize(float64(m.Value()))
}
return "0"
}
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalJSON(s []byte) error {
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
return fmt.Errorf("invalid size: %q", s)
}
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
*m = MemBytes(val)
return err
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
_, _, err := ParseLink(val)
return val, err
}

146
libnetwork/vendor/github.com/docker/docker/opts/port.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
package opts
import (
"encoding/csv"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
const (
portOptTargetPort = "target"
portOptPublishedPort = "published"
portOptProtocol = "protocol"
portOptMode = "mode"
)
// PortOpt represents a port config in swarm mode.
type PortOpt struct {
ports []swarm.PortConfig
}
// Set a new port value
func (p *PortOpt) Set(value string) error {
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
if err != nil {
return err
}
if longSyntax {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
pConfig := swarm.PortConfig{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("invalid field %s", field)
}
key := strings.ToLower(parts[0])
value := strings.ToLower(parts[1])
switch key {
case portOptProtocol:
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
return fmt.Errorf("invalid protocol value %s", value)
}
pConfig.Protocol = swarm.PortConfigProtocol(value)
case portOptMode:
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
return fmt.Errorf("invalid publish mode value %s", value)
}
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
case portOptTargetPort:
tPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.TargetPort = uint32(tPort)
case portOptPublishedPort:
pPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.PublishedPort = uint32(pPort)
default:
return fmt.Errorf("invalid field key %s", key)
}
}
if pConfig.TargetPort == 0 {
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
}
if pConfig.PublishMode == "" {
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
}
if pConfig.Protocol == "" {
pConfig.Protocol = swarm.PortConfigProtocolTCP
}
p.ports = append(p.ports, pConfig)
} else {
// short syntax
portConfigs := []swarm.PortConfig{}
// We can ignore errors because the format was already validated by ValidatePort
ports, portBindings, _ := nat.ParsePortSpecs([]string{value})
for port := range ports {
portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...)
}
p.ports = append(p.ports, portConfigs...)
}
return nil
}
// Type returns the type of this option
func (p *PortOpt) Type() string {
return "port"
}
// String returns a string repr of this option
func (p *PortOpt) String() string {
ports := []string{}
for _, port := range p.ports {
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
ports = append(ports, repr)
}
return strings.Join(ports, ", ")
}
// Value returns the ports
func (p *PortOpt) Value() []swarm.PortConfig {
return p.ports
}
// ConvertPortToPortConfig converts ports to the swarm type
func ConvertPortToPortConfig(
port nat.Port,
portBindings map[nat.Port][]nat.PortBinding,
) []swarm.PortConfig {
ports := []swarm.PortConfig{}
for _, binding := range portBindings[port] {
hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16)
ports = append(ports, swarm.PortConfig{
//TODO Name: ?
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
TargetPort: uint32(port.Int()),
PublishedPort: uint32(hostPort),
PublishMode: swarm.PortConfigPublishModeIngress,
})
}
return ports
}

View file

@ -0,0 +1,107 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/docker/docker/api/types"
)
// SecretOpt is a Value type for parsing secrets
type SecretOpt struct {
values []*types.SecretRequestOption
}
// Set a new secret value
func (o *SecretOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
options := &types.SecretRequestOption{
Source: "",
Target: "",
UID: "0",
GID: "0",
Mode: 0444,
}
// support a simple syntax of --secret foo
if len(fields) == 1 {
options.Source = fields[0]
options.Target = fields[0]
o.values = append(o.values, options)
return nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "source":
options.Source = value
case "target":
tDir, _ := filepath.Split(value)
if tDir != "" {
return fmt.Errorf("target must not be a path")
}
options.Target = value
case "uid":
options.UID = value
case "gid":
options.GID = value
case "mode":
m, err := strconv.ParseUint(value, 0, 32)
if err != nil {
return fmt.Errorf("invalid mode specified: %v", err)
}
options.Mode = os.FileMode(m)
default:
if len(fields) == 1 && value == "" {
} else {
return fmt.Errorf("invalid field in secret request: %s", key)
}
}
}
if options.Source == "" {
return fmt.Errorf("source is required")
}
o.values = append(o.values, options)
return nil
}
// Type returns the type of this option
func (o *SecretOpt) Type() string {
return "secret"
}
// String returns a string repr of this option
func (o *SecretOpt) String() string {
secrets := []string{}
for _, secret := range o.values {
repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target)
secrets = append(secrets, repr)
}
return strings.Join(secrets, ", ")
}
// Value returns the secret requests
func (o *SecretOpt) Value() []*types.SecretRequestOption {
return o.values
}

View file

@ -0,0 +1,111 @@
package opts
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/go-units"
)
// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error.
type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error)
// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format.
func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
rate, err := units.RAMInBytes(split[1])
if err != nil {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
}
if rate < 0 {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
}
return &blkiodev.ThrottleDevice{
Path: split[0],
Rate: uint64(rate),
}, nil
}
// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format.
func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
rate, err := strconv.ParseUint(split[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
if rate < 0 {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
return &blkiodev.ThrottleDevice{
Path: split[0],
Rate: uint64(rate),
}, nil
}
// ThrottledeviceOpt defines a map of ThrottleDevices
type ThrottledeviceOpt struct {
values []*blkiodev.ThrottleDevice
validator ValidatorThrottleFctType
}
// NewThrottledeviceOpt creates a new ThrottledeviceOpt
func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt {
values := []*blkiodev.ThrottleDevice{}
return ThrottledeviceOpt{
values: values,
validator: validator,
}
}
// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt
func (opt *ThrottledeviceOpt) Set(val string) error {
var value *blkiodev.ThrottleDevice
if opt.validator != nil {
v, err := opt.validator(val)
if err != nil {
return err
}
value = v
}
(opt.values) = append((opt.values), value)
return nil
}
// String returns ThrottledeviceOpt values as a string.
func (opt *ThrottledeviceOpt) String() string {
var out []string
for _, v := range opt.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
// GetList returns a slice of pointers to ThrottleDevices.
func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
var throttledevice []*blkiodev.ThrottleDevice
throttledevice = append(throttledevice, opt.values...)
return throttledevice
}
// Type returns the option type
func (opt *ThrottledeviceOpt) Type() string {
return "list"
}

View file

@ -0,0 +1,89 @@
package opts
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/blkiodev"
)
// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
// ValidateWeightDevice validates that the specified string has a valid device-weight format.
func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
weight, err := strconv.ParseUint(split[1], 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid weight for device: %s", val)
}
if weight > 0 && (weight < 10 || weight > 1000) {
return nil, fmt.Errorf("invalid weight for device: %s", val)
}
return &blkiodev.WeightDevice{
Path: split[0],
Weight: uint16(weight),
}, nil
}
// WeightdeviceOpt defines a map of WeightDevices
type WeightdeviceOpt struct {
values []*blkiodev.WeightDevice
validator ValidatorWeightFctType
}
// NewWeightdeviceOpt creates a new WeightdeviceOpt
func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
values := []*blkiodev.WeightDevice{}
return WeightdeviceOpt{
values: values,
validator: validator,
}
}
// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
func (opt *WeightdeviceOpt) Set(val string) error {
var value *blkiodev.WeightDevice
if opt.validator != nil {
v, err := opt.validator(val)
if err != nil {
return err
}
value = v
}
(opt.values) = append((opt.values), value)
return nil
}
// String returns WeightdeviceOpt values as a string.
func (opt *WeightdeviceOpt) String() string {
var out []string
for _, v := range opt.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
// GetList returns a slice of pointers to WeightDevices.
func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
var weightdevice []*blkiodev.WeightDevice
for _, v := range opt.values {
weightdevice = append(weightdevice, v)
}
return weightdevice
}
// Type returns the option type
func (opt *WeightdeviceOpt) Type() string {
return "list"
}

View file

@ -0,0 +1,22 @@
package ioutils
import (
"fmt"
"io"
)
// FprintfIfNotEmpty prints the string value if it's not empty
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
if value != "" {
return fmt.Fprintf(w, format, value)
}
return 0, nil
}
// FprintfIfTrue prints the boolean value if it's true
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
if ok {
return fmt.Fprintf(w, format, ok)
}
return 0, nil
}

View file

@ -152,8 +152,7 @@ func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
func (r *multiReadSeeker) Read(b []byte) (int, error) {
if r.pos == nil {
// make sure all readers are at 0
r.Seek(0, os.SEEK_SET)
r.pos = &pos{0, 0}
}
bLen := int64(len(b))

View file

@ -45,5 +45,4 @@ const (
RELATIME = 0
REMOUNT = 0
STRICTATIME = 0
mntDetach = 0
)

View file

@ -82,6 +82,4 @@ const (
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = syscall.MS_STRICTATIME
mntDetach = syscall.MNT_DETACH
)

View file

@ -27,5 +27,4 @@ const (
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
mntDetach = 0
)

View file

@ -1,8 +1,7 @@
package mount
import (
"sort"
"strings"
"time"
)
// GetMounts retrieves a list of mounts for the current running process.
@ -47,40 +46,29 @@ func Mount(device, target, mType, options string) error {
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
return mount(device, target, mType, uintptr(flag), data)
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
return err
}
return nil
}
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
// Unmount will unmount the target filesystem, so long as it is mounted.
func Unmount(target string) error {
if mounted, err := Mounted(target); err != nil || !mounted {
return err
}
return unmount(target, mntDetach)
return ForceUnmount(target)
}
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
// the deepsest mount first.
func RecursiveUnmount(target string) error {
mounts, err := GetMounts()
if err != nil {
return err
}
// Make the deepest mount be first
sort.Sort(sort.Reverse(byMountpoint(mounts)))
for i, m := range mounts {
if !strings.HasPrefix(m.Mountpoint, target) {
continue
}
if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
return err
}
// Ignore errors for submounts and continue trying to unmount others
// The final unmount should fail if there ane any submounts remaining
// ForceUnmount will force an unmount of the target filesystem, regardless if
// it is mounted or not.
func ForceUnmount(target string) (err error) {
// Simple retry logic for unmount
for i := 0; i < 10; i++ {
if err = unmount(target, 0); err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return nil
return
}

View file

@ -4,50 +4,15 @@ import (
"syscall"
)
const (
// ptypes is the set propagation types.
ptypes = syscall.MS_SHARED | syscall.MS_PRIVATE | syscall.MS_SLAVE | syscall.MS_UNBINDABLE
// pflags is the full set valid flags for a change propagation call.
pflags = ptypes | syscall.MS_REC | syscall.MS_SILENT
// broflags is the combination of bind and read only
broflags = syscall.MS_BIND | syscall.MS_RDONLY
)
// isremount returns true if either device name or flags identify a remount request, false otherwise.
func isremount(device string, flags uintptr) bool {
switch {
// We treat device "" and "none" as a remount request to provide compatibility with
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
case flags&syscall.MS_REMOUNT != 0, device == "", device == "none":
return true
default:
return false
}
}
func mount(device, target, mType string, flags uintptr, data string) error {
oflags := flags &^ ptypes
if !isremount(device, flags) {
// Initial call applying all non-propagation flags.
if err := syscall.Mount(device, target, mType, oflags, data); err != nil {
return err
}
func mount(device, target, mType string, flag uintptr, data string) error {
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
return err
}
if flags&ptypes != 0 {
// Change the propagation type.
if err := syscall.Mount("", target, "", flags&pflags, ""); err != nil {
return err
}
// If we have a bind mount or remount, remount...
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
}
if oflags&broflags == broflags {
// Remount the bind to apply read only.
return syscall.Mount("", target, "", oflags|syscall.MS_REMOUNT, "")
}
return nil
}

View file

@ -38,17 +38,3 @@ type Info struct {
// VfsOpts represents per super block options.
VfsOpts string
}
type byMountpoint []*Info
func (by byMountpoint) Len() int {
return len(by)
}
func (by byMountpoint) Less(i, j int) bool {
return by[i].Mountpoint < by[j].Mountpoint
}
func (by byMountpoint) Swap(i, j int) {
by[i], by[j] = by[j], by[i]
}

View file

@ -1,4 +1,4 @@
// +build linux freebsd solaris openbsd
// +build linux freebsd solaris
// Package kernel provides helper function to get, parse and compare kernel
// versions for different platforms.

View file

@ -78,6 +78,12 @@ type Plugin struct {
handlersRun bool
}
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For v1 plugins, this always returns the host's root directory.
func (p *Plugin) BasePath() string {
return "/"
}
// Name returns the name of the plugin.
func (p *Plugin) Name() string {
return p.name
@ -169,7 +175,7 @@ func (p *Plugin) activateWithLock() error {
func (p *Plugin) waitActive() error {
p.activateWait.L.Lock()
for !p.activated() && p.activateErr == nil {
for !p.activated() {
p.activateWait.Wait()
}
p.activateWait.L.Unlock()

View file

@ -1,9 +0,0 @@
// +build !windows
package plugins
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For v1 plugins, this always returns the host's root directory.
func (p *Plugin) BasePath() string {
return "/"
}

View file

@ -1,8 +0,0 @@
package plugins
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For Windows v1 plugins, this returns an empty string, since the plugin is already aware
// of the absolute path of the mount.
func (p *Plugin) BasePath() string {
return ""
}

View file

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2014-2017 Docker, Inc.
Copyright 2014-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,4 +1,4 @@
Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved.
Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View file

@ -3,7 +3,6 @@
package system
import (
"io/ioutil"
"os"
"path/filepath"
)
@ -25,7 +24,7 @@ func IsAbs(path string) bool {
return filepath.IsAbs(path)
}
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
// The functions below here are wrappers for the equivalents in the os package.
// They are passthrough on Unix platforms, and only relevant on Windows.
// CreateSequential creates the named file with mode 0666 (before umask), truncating
@ -53,16 +52,3 @@ func OpenSequential(name string) (*os.File, error) {
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}
// TempFileSequential creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *os.File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
return ioutil.TempFile(dir, prefix)
}

View file

@ -6,11 +6,8 @@ import (
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
winio "github.com/Microsoft/go-winio"
@ -237,55 +234,3 @@ func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle,
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
return h, e
}
// Helpers for TempFileSequential
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
// file access. Below is the original comment from golang:
// TempFile creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *os.File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}

View file

@ -2,7 +2,9 @@
package system
import "syscall"
import (
"syscall"
)
// Lstat takes a path to a file and returns
// a system.StatT type pertaining to that file.

View file

@ -1,14 +1,25 @@
// +build windows
package system
import "os"
import (
"os"
)
// Lstat calls os.Lstat to get a fileinfo interface back.
// This is then copied into our own locally defined structure.
// Note the Linux version uses fromStatT to do the copy back,
// but that not strictly necessary when already in an OS specific module.
func Lstat(path string) (*StatT, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, err
}
return fromStatT(&fi)
return &StatT{
name: fi.Name(),
size: fi.Size(),
mode: fi.Mode(),
modTime: fi.ModTime(),
isDir: fi.IsDir()}, nil
}

View file

@ -7,7 +7,6 @@ import (
"unsafe"
)
// #cgo CFLAGS: -std=c99
// #cgo LDFLAGS: -lkstat
// #include <unistd.h>
// #include <stdlib.h>

View file

@ -0,0 +1,20 @@
package system
// IsProcessAlive returns true if process with a given pid is running.
func IsProcessAlive(pid int) bool {
// TODO Windows containerd. Not sure this is needed
// p, err := os.FindProcess(pid)
// if err == nil {
// return true
// }
return false
}
// KillProcess force-stops a process.
func KillProcess(pid int) {
// TODO Windows containerd. Not sure this is needed
// p, err := os.FindProcess(pid)
// if err == nil {
// p.Kill()
// }
}

View file

@ -1,80 +0,0 @@
package system
import (
"os"
"syscall"
"time"
"github.com/docker/docker/pkg/mount"
"github.com/pkg/errors"
)
// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
// often be remedied.
// Only use `EnsureRemoveAll` if you really want to make every effort to remove
// a directory.
//
// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
// can be a race between reading directory entries and then actually attempting
// to remove everything in the directory.
// These types of errors do not need to be returned since it's ok for the dir to
// be gone we can just retry the remove operation.
//
// This should not return a `os.ErrNotExist` kind of error under any cirucmstances
func EnsureRemoveAll(dir string) error {
notExistErr := make(map[string]bool)
// track retries
exitOnErr := make(map[string]int)
maxRetry := 5
// Attempt to unmount anything beneath this dir first
mount.RecursiveUnmount(dir)
for {
err := os.RemoveAll(dir)
if err == nil {
return err
}
pe, ok := err.(*os.PathError)
if !ok {
return err
}
if os.IsNotExist(err) {
if notExistErr[pe.Path] {
return err
}
notExistErr[pe.Path] = true
// There is a race where some subdir can be removed but after the parent
// dir entries have been read.
// So the path could be from `os.Remove(subdir)`
// If the reported non-existent path is not the passed in `dir` we
// should just retry, but otherwise return with no error.
if pe.Path == dir {
return nil
}
continue
}
if pe.Err != syscall.EBUSY {
return err
}
if mounted, _ := mount.Mounted(pe.Path); mounted {
if e := mount.Unmount(pe.Path); e != nil {
if mounted, _ := mount.Mounted(pe.Path); mounted {
return errors.Wrapf(e, "error while removing %s", dir)
}
}
}
if exitOnErr[pe.Path] == maxRetry {
return err
}
exitOnErr[pe.Path]++
time.Sleep(100 * time.Millisecond)
}
}

View file

@ -2,7 +2,9 @@
package system
import "syscall"
import (
"syscall"
)
// StatT type contains status of a file. It contains metadata
// like permission, owner, group, size, etc about a file.
@ -45,14 +47,7 @@ func (s StatT) Mtim() syscall.Timespec {
return s.mtim
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
// GetLastModification returns file's last modification time.
func (s StatT) GetLastModification() syscall.Timespec {
return s.Mtim()
}

View file

@ -1,8 +1,10 @@
package system
import "syscall"
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
@ -11,3 +13,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}
// FromStatT loads a system.StatT from a syscall.Stat_t.
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View file

@ -1,6 +1,8 @@
package system
import "syscall"
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
@ -11,3 +13,15 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}
// Stat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View file

@ -1,19 +1,33 @@
package system
import "syscall"
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
rdev: s.Rdev,
mtim: s.Mtim}, nil
}
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
// This is exposed on Linux as pkg/archive/changes uses it.
// FromStatT exists only on linux, and loads a system.StatT from a
// syscal.Stat_t.
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View file

@ -1,8 +1,10 @@
package system
import "syscall"
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),

View file

@ -1,8 +1,12 @@
// +build solaris
package system
import "syscall"
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
@ -11,3 +15,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}
// FromStatT loads a system.StatT from a syscal.Stat_t.
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View file

@ -0,0 +1,17 @@
// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin
package system
import (
"syscall"
)
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}

View file

@ -1,3 +1,5 @@
// +build windows
package system
import (
@ -6,11 +8,18 @@ import (
)
// StatT type contains status of a file. It contains metadata
// like permission, size, etc about a file.
// like name, permission, size, etc about a file.
type StatT struct {
mode os.FileMode
size int64
mtim time.Time
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
// Name returns file's name.
func (s StatT) Name() string {
return s.name
}
// Size returns file's size.
@ -20,30 +29,15 @@ func (s StatT) Size() int64 {
// Mode returns file's permission mode.
func (s StatT) Mode() os.FileMode {
return os.FileMode(s.mode)
return s.mode
}
// Mtim returns file's last modification time.
func (s StatT) Mtim() time.Time {
return time.Time(s.mtim)
// ModTime returns file's last modification time.
func (s StatT) ModTime() time.Time {
return s.modTime
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
return fromStatT(&fi)
}
// fromStatT converts a os.FileInfo type to a system.StatT type
func fromStatT(fi *os.FileInfo) (*StatT, error) {
return &StatT{
size: (*fi).Size(),
mode: (*fi).Mode(),
mtim: (*fi).ModTime()}, nil
// IsDir returns whether file is actually a directory.
func (s StatT) IsDir() bool {
return s.isDir
}

View file

@ -1,74 +0,0 @@
package term
import (
"io"
)
// EscapeError is special error which returned by a TTY proxy reader's Read()
// method in case its detach escape sequence is read.
type EscapeError struct{}
func (EscapeError) Error() string {
return "read escape sequence"
}
// escapeProxy is used only for attaches with a TTY. It is used to proxy
// stdin keypresses from the underlying reader and look for the passed in
// escape key sequence to signal a detach.
type escapeProxy struct {
escapeKeys []byte
escapeKeyPos int
r io.Reader
}
// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader
// and detects when the specified escape keys are read, in which case the Read
// method will return an error of type EscapeError.
func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader {
return &escapeProxy{
escapeKeys: escapeKeys,
r: r,
}
}
func (r *escapeProxy) Read(buf []byte) (int, error) {
nr, err := r.r.Read(buf)
preserve := func() {
// this preserves the original key presses in the passed in buffer
nr += r.escapeKeyPos
preserve := make([]byte, 0, r.escapeKeyPos+len(buf))
preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...)
preserve = append(preserve, buf...)
r.escapeKeyPos = 0
copy(buf[0:nr], preserve)
}
if nr != 1 || err != nil {
if r.escapeKeyPos > 0 {
preserve()
}
return nr, err
}
if buf[0] != r.escapeKeys[r.escapeKeyPos] {
if r.escapeKeyPos > 0 {
preserve()
}
return nr, nil
}
if r.escapeKeyPos == len(r.escapeKeys)-1 {
return 0, EscapeError{}
}
// Looks like we've got an escape key, but we need to match again on the next
// read.
// Store the current escape key we found so we can look for the next one on
// the next read.
// Since this is an escape key, make sure we don't let the caller read it
// If later on we find that this is not the escape sequence, we'll add the
// keys back
r.escapeKeyPos++
return nr - r.escapeKeyPos, nil
}

View file

@ -0,0 +1,50 @@
// +build linux,cgo
package term
import (
"syscall"
"unsafe"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
newState := oldState.termios
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
if err := tcset(fd, &newState); err != 0 {
return nil, err
}
return &oldState, nil
}
func tcget(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}

View file

@ -1,4 +1,5 @@
// +build !windows
// +build !linux !cgo
// +build !solaris !cgo
package term
@ -6,16 +7,14 @@ package term
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
func tcget(fd uintptr, p *Termios) syscall.Errno {
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
return err
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
return err
}

View file

@ -5,17 +5,15 @@ package term
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for unix.Termios in order to make it portable with
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios unix.Termios
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
@ -28,11 +26,11 @@ func MakeRaw(fd uintptr) (*State, error) {
newState := oldState.termios
newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY)
newState.Oflag &^= unix.OPOST
newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
newState.Cflag &^= (unix.CSIZE | unix.PARENB)
newState.Cflag |= unix.CS8
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
/*
VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned

View file

@ -10,8 +10,7 @@ import (
"io"
"os"
"os/signal"
"golang.org/x/sys/unix"
"syscall"
)
var (
@ -32,7 +31,7 @@ type Winsize struct {
y uint16
}
// StdStreams returns the standard streams (stdin, stdout, stderr).
// StdStreams returns the standard streams (stdin, stdout, stedrr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
return os.Stdin, os.Stdout, os.Stderr
}
@ -80,7 +79,7 @@ func SaveState(fd uintptr) (*State, error) {
// descriptor, with echo disabled.
func DisableEcho(fd uintptr, state *State) error {
newState := state.termios
newState.Lflag &^= unix.ECHO
newState.Lflag &^= syscall.ECHO
if err := tcset(fd, &newState); err != 0 {
return err

View file

@ -1,11 +1,10 @@
// +build solaris,cgo
// +build solaris
package term
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
/*
@ -23,7 +22,7 @@ import "C"
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return ws, nil
@ -33,7 +32,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return nil

View file

@ -3,15 +3,14 @@
package term
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return ws, nil
@ -21,7 +20,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return nil

View file

@ -6,10 +6,10 @@ import (
"io"
"os"
"os/signal"
"syscall"
"github.com/Azure/go-ansiterm/winterm"
"github.com/docker/docker/pkg/term/windows"
"golang.org/x/sys/windows"
)
// State holds the console mode for the terminal.
@ -33,7 +33,7 @@ const (
// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
// StdStreams returns the standard streams (stdin, stdout, stedrr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
@ -79,19 +79,19 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
}
if emulateStdin {
stdIn = windowsconsole.NewAnsiReader(windows.STD_INPUT_HANDLE)
stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
} else {
stdIn = os.Stdin
}
if emulateStdout {
stdOut = windowsconsole.NewAnsiWriter(windows.STD_OUTPUT_HANDLE)
stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
} else {
stdOut = os.Stdout
}
if emulateStderr {
stdErr = windowsconsole.NewAnsiWriter(windows.STD_ERROR_HANDLE)
stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
} else {
stdErr = os.Stderr
}
@ -101,7 +101,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
return windowsconsole.GetHandleInfo(in)
return windows.GetHandleInfo(in)
}
// GetWinsize returns the window size based on the specified file descriptor.
@ -121,7 +121,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
return windowsconsole.IsConsole(fd)
return windows.IsConsole(fd)
}
// RestoreTerminal restores the terminal connected to the given file descriptor

View file

@ -1,42 +0,0 @@
// +build darwin freebsd openbsd
package term
import (
"unsafe"
"golang.org/x/sys/unix"
)
const (
getTermios = unix.TIOCGETA
setTermios = unix.TIOCSETA
)
// Termios is the Unix API for terminal I/O.
type Termios unix.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
newState.Oflag &^= unix.OPOST
newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
newState.Cflag &^= (unix.CSIZE | unix.PARENB)
newState.Cflag |= unix.CS8
newState.Cc[unix.VMIN] = 1
newState.Cc[unix.VTIME] = 0
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View file

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint64
Oflag uint64
Cflag uint64
Lflag uint64
Cc [20]byte
Ispeed uint64
Ospeed uint64
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View file

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View file

@ -1,37 +1,46 @@
// +build !cgo
package term
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
const (
getTermios = unix.TCGETS
setTermios = unix.TCSETS
getTermios = syscall.TCGETS
setTermios = syscall.TCSETS
)
// Termios is the Unix API for terminal I/O.
type Termios unix.Termios
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
newState.Oflag |= unix.OPOST
newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
newState.Cflag &^= (unix.CSIZE | unix.PARENB)
newState.Cflag |= unix.CS8
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil

View file

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View file

@ -1,6 +1,6 @@
// +build windows
package windowsconsole
package windows
import (
"bytes"

View file

@ -1,6 +1,6 @@
// +build windows
package windowsconsole
package windows
import (
"io"

View file

@ -1,6 +1,6 @@
// +build windows
package windowsconsole
package windows
import (
"os"

View file

@ -2,7 +2,7 @@
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
package windowsconsole
package windows
import (
"io/ioutil"

View file

@ -1,136 +0,0 @@
# the following lines are in sorted order, FYI
github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62
github.com/Microsoft/hcsshim v0.5.17
github.com/Microsoft/go-winio v0.4.1
github.com/Sirupsen/logrus v0.11.0
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
github.com/gorilla/context v1.1
github.com/gorilla/mux v1.1
github.com/kr/pty 5cf931ef8f
github.com/mattn/go-shellwords v1.0.3
github.com/tchap/go-patricia v2.2.6
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
# forked golang.org/x/net package includes a patch for lazy loading trace templates
golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674
golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/pmezard/go-difflib v1.0.0
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
github.com/imdario/mergo 0.2.1
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
#get libnetwork packages
github.com/docker/libnetwork fd9cf1bc88ac6d54f3d0313da8c2660f128a360b https://github.com/fcrisciani/libnetwork
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
github.com/hashicorp/memberlist v0.1.0
github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
github.com/vishvananda/netlink 1e86b2bee5b6a7d377e4c02bb7f98209d6a7297c
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
github.com/coreos/etcd ea5389a79f40206170582c1ea076191b8622cb8e https://github.com/aaronlehmann/etcd # for https://github.com/coreos/etcd/pull/7830
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
github.com/hashicorp/consul v0.5.2
github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
# get graph and distribution packages
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
github.com/vbatts/tar-split v0.10.1
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
# get go-zfs packages
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
github.com/pborman/uuid v1.0
google.golang.org/grpc v1.0.4
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
github.com/opencontainers/runc 992a5be178a62e026f4069f443c6164912adbf09
github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe
github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
github.com/coreos/go-systemd v4
github.com/godbus/dbus v4.0.0
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
# gelf logging driver deps
github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea
github.com/fluent/fluent-logger-golang v1.2.1
# fluent-logger-golang deps
github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
# fsnotify
github.com/fsnotify/fsnotify v1.2.11
# awslogs deps
github.com/aws/aws-sdk-go v1.4.22
github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
# logentries
github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
# gcplogs deps
golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
google.golang.org/genproto b3e7c2fb04031add52c4817f53f43757ccbf9c18
# containerd
github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
# cluster
github.com/docker/swarmkit 1a3e510517be82d18ac04380b5f71eddf06c2fc0
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
# cli
github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
# metrics
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
github.com/opencontainers/selinux v1.0.0-rc1

View file

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,425 +0,0 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

View file

@ -1,104 +0,0 @@
# go-digest
[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest)
Common digest package used across the container ecosystem.
Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
# What is a digest?
A digest is just a hash.
The most common use case for a digest is to create a content
identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
systems:
```go
id := digest.FromBytes([]byte("my content"))
```
In the example above, the id can be used to uniquely identify
the byte slice "my content". This allows two disparate applications
to agree on a verifiable identifier without having to trust one
another.
An identifying digest can be verified, as follows:
```go
if id != digest.FromBytes([]byte("my content")) {
return errors.New("the content has changed!")
}
```
A `Verifier` type can be used to handle cases where an `io.Reader`
makes more sense:
```go
rd := getContent()
verifier := id.Verifier()
io.Copy(verifier, rd)
if !verifier.Verified() {
return errors.New("the content has changed!")
}
```
Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
can power a rich, safe, content distribution system.
# Usage
While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is
considered the best resource, a few important items need to be called
out when using this package.
1. Make sure to import the hash implementations into your application
or the package will panic. You should have something like the
following in the main (or other entrypoint) of your application:
```go
import (
_ "crypto/sha256"
_ "crypto/sha512"
)
```
This may seem inconvenient but it allows you replace the hash
implementations with others, such as https://github.com/stevvooe/resumable.
2. Even though `digest.Digest` may be assemable as a string, _always_
verify your input with `digest.Parse` or use `Digest.Validate`
when accepting untrusted input. While there are measures to
avoid common problems, this will ensure you have valid digests
in the rest of your application.
# Stability
The Go API, at this stage, is considered stable, unless otherwise noted.
As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
# Contributing
This package is considered fairly complete. It has been in production
in thousands (millions?) of deployments and is fairly battle-hardened.
New additions will be met with skepticism. If you think there is a
missing feature, please file a bug clearly describing the problem and
the alternatives you tried before submitting a PR.
# Reporting security issues
Please DO NOT file a public issue, instead send your report privately to
security@opencontainers.org.
The maintainers take security seriously. If you discover a security issue,
please bring it to their attention right away!
If you are reporting a security issue, do not create an issue or file a pull
request on GitHub. Instead, disclose the issue responsibly by sending an email
to security@opencontainers.org (which is inhabited only by the maintainers of
the various OCI projects).
# Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.

View file

@ -1,166 +0,0 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
"crypto"
"fmt"
"hash"
"io"
)
// Algorithm identifies and implementation of a digester by an identifier.
// Note the that this defines both the hash algorithm used and the string
// encoding.
type Algorithm string
// supported digest types
const (
SHA256 Algorithm = "sha256" // sha256 with hex encoding
SHA384 Algorithm = "sha384" // sha384 with hex encoding
SHA512 Algorithm = "sha512" // sha512 with hex encoding
// Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage
// digest.
Canonical = SHA256
)
var (
// TODO(stevvooe): Follow the pattern of the standard crypto package for
// registration of digests. Effectively, we are a registerable set and
// common symbol access.
// algorithms maps values to hash.Hash implementations. Other algorithms
// may be available but they cannot be calculated by the digest package.
algorithms = map[Algorithm]crypto.Hash{
SHA256: crypto.SHA256,
SHA384: crypto.SHA384,
SHA512: crypto.SHA512,
}
)
// Available returns true if the digest type is available for use. If this
// returns false, Digester and Hash will return nil.
func (a Algorithm) Available() bool {
h, ok := algorithms[a]
if !ok {
return false
}
// check availability of the hash, as well
return h.Available()
}
func (a Algorithm) String() string {
return string(a)
}
// Size returns number of bytes returned by the hash.
func (a Algorithm) Size() int {
h, ok := algorithms[a]
if !ok {
return 0
}
return h.Size()
}
// Set implemented to allow use of Algorithm as a command line flag.
func (a *Algorithm) Set(value string) error {
if value == "" {
*a = Canonical
} else {
// just do a type conversion, support is queried with Available.
*a = Algorithm(value)
}
if !a.Available() {
return ErrDigestUnsupported
}
return nil
}
// Digester returns a new digester for the specified algorithm. If the algorithm
// does not have a digester implementation, nil will be returned. This can be
// checked by calling Available before calling Digester.
func (a Algorithm) Digester() Digester {
return &digester{
alg: a,
hash: a.Hash(),
}
}
// Hash returns a new hash as used by the algorithm. If not available, the
// method will panic. Check Algorithm.Available() before calling.
func (a Algorithm) Hash() hash.Hash {
if !a.Available() {
// Empty algorithm string is invalid
if a == "" {
panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
}
// NOTE(stevvooe): A missing hash is usually a programming error that
// must be resolved at compile time. We don't import in the digest
// package to allow users to choose their hash implementation (such as
// when using stevvooe/resumable or a hardware accelerated package).
//
// Applications that may want to resolve the hash at runtime should
// call Algorithm.Available before call Algorithm.Hash().
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
}
return algorithms[a].New()
}
// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
// the encoded portion of the digest.
func (a Algorithm) Encode(d []byte) string {
// TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
// add support for back registration, we can modify this accordingly.
return fmt.Sprintf("%x", d)
}
// FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.Digester()
if _, err := io.Copy(digester.Hash(), rd); err != nil {
return "", err
}
return digester.Digest(), nil
}
// FromBytes digests the input and returns a Digest.
func (a Algorithm) FromBytes(p []byte) Digest {
digester := a.Digester()
if _, err := digester.Hash().Write(p); err != nil {
// Writes to a Hash should never fail. None of the existing
// hash implementations in the stdlib or hashes vendored
// here can return errors from Write. Having a panic in this
// condition instead of having FromBytes return an error value
// avoids unnecessary error handling paths in all callers.
panic("write to hash function returned error: " + err.Error())
}
return digester.Digest()
}
// FromString digests the string input and returns a Digest.
func (a Algorithm) FromString(s string) Digest {
return a.FromBytes([]byte(s))
}

View file

@ -1,164 +0,0 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
"fmt"
"hash"
"io"
"regexp"
"strings"
)
// Digest allows simple protection of hex formatted digest strings, prefixed
// by their algorithm. Strings of type Digest have some guarantee of being in
// the correct format and it provides quick access to the components of a
// digest string.
//
// The following is an example of the contents of Digest types:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// This allows to abstract the digest behind this type and work only in those
// terms.
type Digest string
// NewDigest returns a Digest from alg and a hash.Hash object.
func NewDigest(alg Algorithm, h hash.Hash) Digest {
return NewDigestFromBytes(alg, h.Sum(nil))
}
// NewDigestFromBytes returns a new digest from the byte contents of p.
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
// functions. This is also useful for rebuilding digests from binary
// serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
return NewDigestFromEncoded(alg, alg.Encode(p))
}
// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
func NewDigestFromHex(alg, hex string) Digest {
return NewDigestFromEncoded(Algorithm(alg), hex)
}
// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, encoded))
}
// DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
var (
// ErrDigestInvalidFormat returned when digest format invalid.
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
// ErrDigestInvalidLength returned when digest has invalid length.
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
)
// Parse parses s and returns the validated digest object. An error will
// be returned if the format is invalid.
func Parse(s string) (Digest, error) {
d := Digest(s)
return d, d.Validate()
}
// FromReader consumes the content of rd until io.EOF, returning canonical digest.
func FromReader(rd io.Reader) (Digest, error) {
return Canonical.FromReader(rd)
}
// FromBytes digests the input and returns a Digest.
func FromBytes(p []byte) Digest {
return Canonical.FromBytes(p)
}
// FromString digests the input and returns a Digest.
func FromString(s string) Digest {
return Canonical.FromString(s)
}
// Validate checks that the contents of d is a valid digest, returning an
// error if not.
func (d Digest) Validate() error {
s := string(d)
i := strings.Index(s, ":")
// validate i then run through regexp
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat
}
algorithm := Algorithm(s[:i])
if !algorithm.Available() {
return ErrDigestUnsupported
}
// Digests much always be hex-encoded, ensuring that their hex portion will
// always be size*2
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
return nil
}
// Algorithm returns the algorithm portion of the digest. This will panic if
// the underlying digest is not in a valid format.
func (d Digest) Algorithm() Algorithm {
return Algorithm(d[:d.sepIndex()])
}
// Verifier returns a writer object that can be used to verify a stream of
// content against the digest. If the digest is invalid, the method will panic.
func (d Digest) Verifier() Verifier {
return hashVerifier{
hash: d.Algorithm().Hash(),
digest: d,
}
}
// Encoded returns the encoded portion of the digest. This will panic if the
// underlying digest is not in a valid format.
func (d Digest) Encoded() string {
return string(d[d.sepIndex()+1:])
}
// Hex is deprecated. Please use Digest.Encoded.
func (d Digest) Hex() string {
return d.Encoded()
}
func (d Digest) String() string {
return string(d)
}
func (d Digest) sepIndex() int {
i := strings.Index(string(d), ":")
if i < 0 {
panic(fmt.Sprintf("no ':' separator in digest %q", d))
}
return i
}

Some files were not shown because too many files have changed in this diff Show more