Merge pull request #26866 from docker/cherry-picks-1.12.2
Cherry picks 1.12.2
This commit is contained in:
commit
1ec9ca1ca6
34 changed files with 613 additions and 156 deletions
|
@ -91,11 +91,9 @@ RUN set -x \
|
|||
# NOTE: ppc64le has compatibility issues with older versions of go, so make sure the version >= 1.6
|
||||
ENV GO_VERSION 1.6.3
|
||||
ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz
|
||||
ENV GO_DOWNLOAD_SHA256 787b0b750d037016a30c6ed05a8a70a91b2e9db4bd9b1a2453aa502a63f1bccc
|
||||
ENV GOROOT_BOOTSTRAP /usr/local
|
||||
|
||||
RUN curl -fsSL "$GO_DOWNLOAD_URL" -o golang.tar.gz \
|
||||
&& echo "$GO_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \
|
||||
&& tar -C /usr/src -xzf golang.tar.gz \
|
||||
&& rm golang.tar.gz \
|
||||
&& cd /usr/src/go/src && ./make.bash 2>&1
|
||||
|
|
|
@ -41,7 +41,7 @@ func newListCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs")
|
||||
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output")
|
||||
flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Provide filter values (i.e. 'dangling=true')")
|
||||
|
||||
|
|
|
@ -71,6 +71,8 @@ func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc)
|
|||
// print extra space between objects, but not after the last one
|
||||
if idx+1 != len(refs) {
|
||||
fmt.Fprintf(out, "\n\n")
|
||||
} else {
|
||||
fmt.Fprintf(out, "\n")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -19,7 +19,7 @@ func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|||
|
||||
cmd := &cobra.Command{
|
||||
Use: "leave [OPTIONS]",
|
||||
Short: "Leave a swarm",
|
||||
Short: "Leave the swarm (workers only)",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runLeave(dockerCli, opts)
|
||||
|
@ -27,7 +27,7 @@ func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.force, "force", false, "Force leave ignoring warnings.")
|
||||
flags.BoolVar(&opts.force, "force", false, "Force this node to leave the swarm, ignoring warnings")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -357,5 +357,5 @@ func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Node{Value: typ, Next: cmd, Attributes: attrs}, nil, err
|
||||
return &Node{Value: typ, Next: cmd}, attrs, err
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ case "$1" in
|
|||
|
||||
status)
|
||||
check_init
|
||||
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
|
||||
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
|
|
|
@ -455,11 +455,13 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
|||
}
|
||||
|
||||
var advertiseAddr string
|
||||
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
|
||||
// For joining, we don't need to provide an advertise address,
|
||||
// since the remote side can detect it.
|
||||
if err == nil {
|
||||
advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
|
||||
if req.AdvertiseAddr != "" {
|
||||
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
|
||||
// For joining, we don't need to provide an advertise address,
|
||||
// since the remote side can detect it.
|
||||
if err == nil {
|
||||
advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
|
||||
}
|
||||
}
|
||||
|
||||
// todo: check current state existing
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
|
@ -264,6 +265,39 @@ func (a *Driver) createDirsFor(id string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Helper function to debug EBUSY errors on remove.
|
||||
func debugEBusy(mountPath string) (out []string, err error) {
|
||||
// lsof is not part of GNU coreutils. This is a best effort
|
||||
// attempt to detect offending processes.
|
||||
c := exec.Command("lsof")
|
||||
|
||||
r, err := c.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Assigning pipes failed with %v", err)
|
||||
}
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
waiterr := c.Wait()
|
||||
if waiterr != nil && err == nil {
|
||||
err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr)
|
||||
}
|
||||
}()
|
||||
|
||||
sc := bufio.NewScanner(r)
|
||||
for sc.Scan() {
|
||||
entry := sc.Text()
|
||||
if strings.Contains(entry, mountPath) {
|
||||
out = append(out, entry, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Remove will unmount and remove the given id.
|
||||
func (a *Driver) Remove(id string) error {
|
||||
a.pathCacheLock.Lock()
|
||||
|
@ -272,9 +306,35 @@ func (a *Driver) Remove(id string) error {
|
|||
if !exists {
|
||||
mountpoint = a.getMountpoint(id)
|
||||
}
|
||||
if err := a.unmount(mountpoint); err != nil {
|
||||
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
|
||||
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
|
||||
|
||||
var retries int
|
||||
for {
|
||||
mounted, err := a.mounted(mountpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !mounted {
|
||||
break
|
||||
}
|
||||
|
||||
if err := a.unmount(mountpoint); err != nil {
|
||||
if err != syscall.EBUSY {
|
||||
return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err)
|
||||
}
|
||||
if retries >= 5 {
|
||||
out, debugErr := debugEBusy(mountpoint)
|
||||
if debugErr == nil {
|
||||
logrus.Warnf("debugEBusy returned %v", out)
|
||||
}
|
||||
return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err)
|
||||
}
|
||||
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
|
||||
retries++
|
||||
logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Atomically remove each directory in turn by first moving it out of the
|
||||
|
@ -282,6 +342,13 @@ func (a *Driver) Remove(id string) error {
|
|||
// the whole tree.
|
||||
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
|
||||
if err == syscall.EBUSY {
|
||||
logrus.Warnf("os.Rename err due to EBUSY")
|
||||
out, debugErr := debugEBusy(mountpoint)
|
||||
if debugErr == nil {
|
||||
logrus.Warnf("debugEBusy returned %v", out)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpMntPath)
|
||||
|
|
|
@ -157,7 +157,9 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta
|
|||
|
||||
cntrs := make([]*container.Container, 0, len(matches))
|
||||
for id := range matches {
|
||||
cntrs = append(cntrs, daemon.containers.Get(id))
|
||||
if c := daemon.containers.Get(id); c != nil {
|
||||
cntrs = append(cntrs, c)
|
||||
}
|
||||
}
|
||||
|
||||
// Restore sort-order after filtering
|
||||
|
|
|
@ -38,7 +38,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
|||
// This is kept for backward compatibility - hostconfig should be passed when
|
||||
// creating a container, not during start.
|
||||
if hostConfig != nil {
|
||||
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12")
|
||||
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
||||
oldNetworkMode := container.HostConfig.NetworkMode
|
||||
if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
|
||||
return err
|
||||
|
@ -52,7 +52,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
|||
newNetworkMode := container.HostConfig.NetworkMode
|
||||
if string(oldNetworkMode) != string(newNetworkMode) {
|
||||
// if user has change the network mode on starting, clean up the
|
||||
// old networks. It is a deprecated feature and will be removed in Docker 1.12
|
||||
// old networks. It is a deprecated feature and has been removed in Docker 1.12
|
||||
container.NetworkSettings.Networks = nil
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return err
|
||||
|
|
|
@ -22,7 +22,7 @@ Options:
|
|||
-f, --filter value Provide filter values (i.e. 'dangling=true') (default [])
|
||||
--help Print usage
|
||||
--no-trunc Do not truncate the output
|
||||
-q, --quiet Only display volume names
|
||||
-q, --quiet Only display network IDs
|
||||
```
|
||||
|
||||
Lists all the networks the Engine `daemon` knows about. This includes the
|
||||
|
|
|
@ -25,36 +25,45 @@ Options:
|
|||
--help Print usage
|
||||
```
|
||||
|
||||
Removes specified nodes from a swarm.
|
||||
When run from a manager node, removes the specified nodes from a swarm.
|
||||
|
||||
|
||||
Example output:
|
||||
|
||||
$ docker node rm swarm-node-02
|
||||
Node swarm-node-02 removed from swarm
|
||||
```nohighlight
|
||||
$ docker node rm swarm-node-02
|
||||
|
||||
Removes nodes from the swarm that are in the down state. Attempting to remove
|
||||
an active node will result in an error:
|
||||
|
||||
```bash
|
||||
$ docker node rm swarm-node-03
|
||||
Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not down and can't be removed
|
||||
Node swarm-node-02 removed from swarm
|
||||
```
|
||||
|
||||
If a worker node becomes compromised, exhibits unexpected or unwanted behavior, or if you lose access to it so
|
||||
that a clean shutdown is impossible, you can use the force option.
|
||||
Removes the specified nodes from the swarm, but only if the nodes are in the
|
||||
down state. If you attempt to remove an active node you will receive an error:
|
||||
|
||||
```bash
|
||||
```nohighlight
|
||||
$ docker node rm swarm-node-03
|
||||
|
||||
Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not
|
||||
down and can't be removed
|
||||
```
|
||||
|
||||
If you lose access to a worker node or need to shut it down because it has been
|
||||
compromised or is not behaving as expected, you can use the `--force` option.
|
||||
This may cause transient errors or interruptions, depending on the type of task
|
||||
being run on the node.
|
||||
|
||||
```nohighlight
|
||||
$ docker node rm --force swarm-node-03
|
||||
|
||||
Node swarm-node-03 removed from swarm
|
||||
```
|
||||
|
||||
Note that manager nodes have to be demoted to worker nodes before they can be removed
|
||||
from the cluster.
|
||||
A manager node must be demoted to a worker node (using `docker node demote`)
|
||||
before you can remove it from the swarm.
|
||||
|
||||
## Related information
|
||||
|
||||
* [node inspect](node_inspect.md)
|
||||
* [node update](node_update.md)
|
||||
* [node demote](node_demote.md)
|
||||
* [node ps](node_ps.md)
|
||||
* [node ls](node_ls.md)
|
||||
|
|
|
@ -15,41 +15,41 @@ parent = "smn_cli"
|
|||
```markdown
|
||||
Usage: docker swarm leave [OPTIONS]
|
||||
|
||||
Leave a swarm
|
||||
Leave the swarm (workers only).
|
||||
|
||||
Options:
|
||||
--force Force leave ignoring warnings.
|
||||
--force Force this node to leave the swarm, ignoring warnings
|
||||
--help Print usage
|
||||
```
|
||||
|
||||
This command causes the node to leave the swarm.
|
||||
When you run this command on a worker, that worker leaves the swarm.
|
||||
|
||||
On a manager node:
|
||||
You can use the `--force` option to on a manager to remove it from the swarm.
|
||||
However, this does not reconfigure the swarm to ensure that there are enough
|
||||
managers to maintain a quorum in the swarm. The safe way to remove a manager
|
||||
from a swarm is to demote it to a worker and then direct it to leave the quorum
|
||||
without using `--force`. Only use `--force` in situations where the swarm will
|
||||
no longer be used after the manager leaves, such as in a single-node swarm.
|
||||
|
||||
Consider the following swarm, as seen from the manager:
|
||||
```bash
|
||||
$ docker node ls
|
||||
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||
7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active
|
||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable
|
||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active
|
||||
dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
|
||||
```
|
||||
|
||||
On a worker node, worker2 in the following example:
|
||||
To remove `worker2`, issue the following command from `worker2` itself:
|
||||
```bash
|
||||
$ docker swarm leave
|
||||
Node left the default swarm.
|
||||
```
|
||||
|
||||
On a manager node:
|
||||
```bash
|
||||
$ docker node ls
|
||||
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||
7ln70fl22uw2dvjn2ft53m3q5 worker2 Down Active
|
||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable
|
||||
dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
|
||||
```
|
||||
To remove an inactive node, use the [`node rm`](swarm_rm.md) command instead.
|
||||
|
||||
## Related information
|
||||
|
||||
* [node rm](node_rm.md)
|
||||
* [swarm init](swarm_init.md)
|
||||
* [swarm join](swarm_join.md)
|
||||
* [swarm update](swarm_update.md)
|
||||
|
|
|
@ -61,7 +61,7 @@ clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://gith
|
|||
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
|
||||
clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
|
||||
clone git github.com/docker/engine-api 4eca04ae18f4f93f40196a17b9aa6e11262a7269
|
||||
clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
|
||||
clone git github.com/RackSec/srslog 365bf33cd9acc21ae1c355209865f17228ca534e
|
||||
clone git github.com/imdario/mergo 0.2.1
|
||||
|
||||
#get libnetwork packages
|
||||
|
@ -83,7 +83,7 @@ clone git github.com/coreos/etcd v2.3.2
|
|||
fix_rewritten_imports github.com/coreos/etcd
|
||||
clone git github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
|
||||
clone git github.com/hashicorp/consul v0.5.2
|
||||
clone git github.com/boltdb/bolt v1.2.1
|
||||
clone git github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
|
||||
clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
|
||||
|
||||
# get graph and distribution packages
|
||||
|
@ -114,7 +114,7 @@ clone git github.com/golang/protobuf 3c84672111d91bb5ac31719e112f9f7126a0e26e
|
|||
# gelf logging driver deps
|
||||
clone git github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
|
||||
|
||||
clone git github.com/fluent/fluent-logger-golang v1.1.0
|
||||
clone git github.com/fluent/fluent-logger-golang v1.2.0
|
||||
# fluent-logger-golang deps
|
||||
clone git github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa
|
||||
clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
|
||||
|
|
|
@ -443,6 +443,25 @@ func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) {
|
|||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) {
|
||||
c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
||||
daemonURL, err := url.Parse(s.d.sock())
|
||||
|
||||
conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10)
|
||||
c.Assert(err, check.IsNil)
|
||||
client := httputil.NewClientConn(conn, nil)
|
||||
req, err := http.NewRequest("GET", "/version", nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json")
|
||||
}
|
||||
|
||||
// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin
|
||||
func assertURIRecorded(c *check.C, uris []string, uri string) {
|
||||
var found bool
|
||||
|
|
|
@ -149,4 +149,19 @@ func (s *DockerSuite) TestHealth(c *check.C) {
|
|||
c.Check(last.ExitCode, checker.Equals, -1)
|
||||
c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)")
|
||||
dockerCmd(c, "rm", "-f", "test")
|
||||
|
||||
// Check JSON-format
|
||||
_, err = buildImage(imageName,
|
||||
`FROM busybox
|
||||
RUN echo OK > /status
|
||||
CMD ["/bin/sleep", "120"]
|
||||
STOPSIGNAL SIGKILL
|
||||
HEALTHCHECK --interval=1s --timeout=30s \
|
||||
CMD ["cat", "/my status"]`,
|
||||
true)
|
||||
c.Check(err, check.IsNil)
|
||||
out, _ = dockerCmd(c, "inspect",
|
||||
"--format={{.Config.Healthcheck.Test}}", imageName)
|
||||
c.Check(out, checker.Equals, "[CMD cat /my status]\n")
|
||||
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ attached.
|
|||
Do not truncate the output
|
||||
|
||||
**-q**, **--quiet**=*true*|*false*
|
||||
Only display numeric IDs
|
||||
Only display network IDs
|
||||
|
||||
**--help**
|
||||
Print usage statement
|
||||
|
|
|
@ -175,11 +175,6 @@ func (rm *responseModifier) Flush() {
|
|||
|
||||
// FlushAll flushes all data to the HTTP response
|
||||
func (rm *responseModifier) FlushAll() error {
|
||||
// Copy the status code
|
||||
if rm.statusCode > 0 {
|
||||
rm.rw.WriteHeader(rm.statusCode)
|
||||
}
|
||||
|
||||
// Copy the header
|
||||
for k, vv := range rm.header {
|
||||
for _, v := range vv {
|
||||
|
@ -187,6 +182,13 @@ func (rm *responseModifier) FlushAll() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Copy the status code
|
||||
// Also WriteHeader needs to be done after all the headers
|
||||
// have been copied (above).
|
||||
if rm.statusCode > 0 {
|
||||
rm.rw.WriteHeader(rm.statusCode)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(rm.body) > 0 {
|
||||
// Write body
|
||||
|
|
|
@ -358,6 +358,27 @@ func RemoveDeviceDeferred(name string) error {
|
|||
return ErrTaskDeferredRemove
|
||||
}
|
||||
|
||||
// set a task cookie and disable library fallback, or else libdevmapper will
|
||||
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
|
||||
// even if the removal is deferred by the kernel.
|
||||
var cookie uint
|
||||
var flags uint16
|
||||
flags = DmUdevDisableLibraryFallback
|
||||
if err := task.setCookie(&cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
|
||||
// libdevmapper and udev relies on System V semaphore for synchronization,
|
||||
// semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
|
||||
// So these two function call must come in pairs, otherwise semaphores will
|
||||
// be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
|
||||
// will be reached, which will eventually make all follwing calls to 'task.SetCookie'
|
||||
// fail.
|
||||
// this call will not wait for the deferred removal's final executing, since no
|
||||
// udev event will be generated, and the semaphore's value will not be incremented
|
||||
// by udev, what UdevWait is just cleaning up the semaphore.
|
||||
defer UdevWait(&cookie)
|
||||
|
||||
if err = task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
|
|||
|
||||
if os.Getenv("ConEmuANSI") == "ON" {
|
||||
// The ConEmu terminal emulates ANSI on output streams well.
|
||||
emulateStdin = true
|
||||
emulateStdout = false
|
||||
emulateStderr = false
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func UnixFormatter(p Priority, hostname, tag, content string) string {
|
|||
// RFC3164Formatter provides an RFC 3164 compliant message.
|
||||
func RFC3164Formatter(p Priority, hostname, tag, content string) string {
|
||||
timestamp := time.Now().Format(time.Stamp)
|
||||
msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s",
|
||||
msg := fmt.Sprintf("<%d>%s %s %s[%d]: %s",
|
||||
p, timestamp, hostname, tag, os.Getpid(), content)
|
||||
return msg
|
||||
}
|
||||
|
|
29
vendor/src/github.com/boltdb/bolt/README.md
vendored
29
vendor/src/github.com/boltdb/bolt/README.md
vendored
|
@ -1,4 +1,4 @@
|
|||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
|
||||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
|
@ -15,11 +15,11 @@ and setting values. That's it.
|
|||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
|
@ -209,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the
|
|||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
|
@ -313,7 +313,7 @@ func (s *Store) CreateUser(u *User) error {
|
|||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ = b.NextSequence()
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
|
@ -448,6 +448,10 @@ db.View(func(tx *bolt.Tx) error {
|
|||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
|
@ -557,7 +561,7 @@ if err != nil {
|
|||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
contstructor that takes in a filepath where the database file will be stored.
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
|
@ -807,6 +811,7 @@ them via pull request.
|
|||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
|
@ -825,7 +830,6 @@ Below is a list of public, open source projects that use Bolt:
|
|||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
|
@ -842,9 +846,12 @@ Below is a list of public, open source projects that use Bolt:
|
|||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
|
|
|
@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
21
vendor/src/github.com/boltdb/bolt/bolt_arm.go
vendored
21
vendor/src/github.com/boltdb/bolt/bolt_arm.go
vendored
|
@ -1,7 +1,28 @@
|
|||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
32
vendor/src/github.com/boltdb/bolt/bucket.go
vendored
32
vendor/src/github.com/boltdb/bolt/bucket.go
vendored
|
@ -130,9 +130,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
|||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable {
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
|
@ -329,6 +337,28 @@ func (b *Bucket) Delete(key []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
|
|
20
vendor/src/github.com/boltdb/bolt/freelist.go
vendored
20
vendor/src/github.com/boltdb/bolt/freelist.go
vendored
|
@ -166,12 +166,16 @@ func (f *freelist) read(p *page) {
|
|||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
|
@ -189,7 +193,9 @@ func (f *freelist) write(p *page) error {
|
|||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) < 0xFFFF {
|
||||
if len(ids) == 0 {
|
||||
p.count = uint16(len(ids))
|
||||
} else if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
} else {
|
||||
|
@ -230,7 +236,7 @@ func (f *freelist) reload(p *page) {
|
|||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool)
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
|
|
5
vendor/src/github.com/boltdb/bolt/node.go
vendored
5
vendor/src/github.com/boltdb/bolt/node.go
vendored
|
@ -201,6 +201,11 @@ func (n *node) write(p *page) {
|
|||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
|
|
6
vendor/src/github.com/boltdb/bolt/page.go
vendored
6
vendor/src/github.com/boltdb/bolt/page.go
vendored
|
@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
|
|||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
|
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
|
|||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,202 @@
|
|||
Copyright (c) 2013 Tatsuo Kaniwa
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package fluent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -25,24 +26,28 @@ const (
|
|||
)
|
||||
|
||||
type Config struct {
|
||||
FluentPort int
|
||||
FluentHost string
|
||||
FluentNetwork string
|
||||
FluentSocketPath string
|
||||
Timeout time.Duration
|
||||
BufferLimit int
|
||||
RetryWait int
|
||||
MaxRetry int
|
||||
TagPrefix string
|
||||
AsyncConnect bool
|
||||
FluentPort int `json:"fluent_port"`
|
||||
FluentHost string `json:"fluent_host"`
|
||||
FluentNetwork string `json:"fluent_network"`
|
||||
FluentSocketPath string `json:"fluent_socket_path"`
|
||||
Timeout time.Duration `json:"timeout"`
|
||||
BufferLimit int `json:"buffer_limit"`
|
||||
RetryWait int `json:"retry_wait"`
|
||||
MaxRetry int `json:"max_retry"`
|
||||
TagPrefix string `json:"tag_prefix"`
|
||||
AsyncConnect bool `json:"async_connect"`
|
||||
MarshalAsJSON bool `json:"marshal_as_json"`
|
||||
}
|
||||
|
||||
type Fluent struct {
|
||||
Config
|
||||
|
||||
mubuff sync.Mutex
|
||||
pending []byte
|
||||
|
||||
muconn sync.Mutex
|
||||
conn io.WriteCloser
|
||||
pending []byte
|
||||
reconnecting bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// New creates a new Logger.
|
||||
|
@ -140,9 +145,9 @@ func (f *Fluent) PostWithTime(tag string, tm time.Time, message interface{}) err
|
|||
}
|
||||
|
||||
if msgtype.Kind() != reflect.Map {
|
||||
return errors.New("messge must be a map")
|
||||
return errors.New("fluent#PostWithTime: message must be a map")
|
||||
} else if msgtype.Key().Kind() != reflect.String {
|
||||
return errors.New("map keys must be strings")
|
||||
return errors.New("fluent#PostWithTime: map keys must be strings")
|
||||
}
|
||||
|
||||
kv := make(map[string]interface{})
|
||||
|
@ -154,33 +159,54 @@ func (f *Fluent) PostWithTime(tag string, tm time.Time, message interface{}) err
|
|||
}
|
||||
|
||||
func (f *Fluent) EncodeAndPostData(tag string, tm time.Time, message interface{}) error {
|
||||
if data, dumperr := f.EncodeData(tag, tm, message); dumperr != nil {
|
||||
return fmt.Errorf("fluent#EncodeAndPostData: can't convert '%s' to msgpack:%s", message, dumperr)
|
||||
// fmt.Println("fluent#Post: can't convert to msgpack:", message, dumperr)
|
||||
} else {
|
||||
f.PostRawData(data)
|
||||
return nil
|
||||
var data []byte
|
||||
var err error
|
||||
if data, err = f.EncodeData(tag, tm, message); err != nil {
|
||||
return fmt.Errorf("fluent#EncodeAndPostData: can't convert '%#v' to msgpack:%v", message, err)
|
||||
}
|
||||
return f.postRawData(data)
|
||||
}
|
||||
|
||||
// Deprecated: Use EncodeAndPostData instead
|
||||
func (f *Fluent) PostRawData(data []byte) {
|
||||
f.mu.Lock()
|
||||
f.pending = append(f.pending, data...)
|
||||
f.mu.Unlock()
|
||||
f.postRawData(data)
|
||||
}
|
||||
|
||||
func (f *Fluent) postRawData(data []byte) error {
|
||||
if err := f.appendBuffer(data); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.send(); err != nil {
|
||||
f.close()
|
||||
if len(f.pending) > f.Config.BufferLimit {
|
||||
f.flushBuffer()
|
||||
}
|
||||
} else {
|
||||
f.flushBuffer()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// For sending forward protocol adopted JSON
|
||||
type MessageChunk struct {
|
||||
message Message
|
||||
}
|
||||
|
||||
// Golang default marshaler does not support
|
||||
// ["value", "value2", {"key":"value"}] style marshaling.
|
||||
// So, it should write JSON marshaler by hand.
|
||||
func (chunk *MessageChunk) MarshalJSON() ([]byte, error) {
|
||||
data, err := json.Marshal(chunk.message.Record)
|
||||
return []byte(fmt.Sprintf("[\"%s\",%d,%s,null]", chunk.message.Tag,
|
||||
chunk.message.Time, data)), err
|
||||
}
|
||||
|
||||
func (f *Fluent) EncodeData(tag string, tm time.Time, message interface{}) (data []byte, err error) {
|
||||
timeUnix := tm.Unix()
|
||||
msg := &Message{Tag: tag, Time: timeUnix, Record: message}
|
||||
data, err = msg.MarshalMsg(nil)
|
||||
if f.Config.MarshalAsJSON {
|
||||
msg := Message{Tag: tag, Time: timeUnix, Record: message}
|
||||
chunk := &MessageChunk{message: msg}
|
||||
data, err = json.Marshal(chunk)
|
||||
} else {
|
||||
msg := &Message{Tag: tag, Time: timeUnix, Record: message}
|
||||
data, err = msg.MarshalMsg(nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -193,23 +219,32 @@ func (f *Fluent) Close() (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// close closes the connection.
|
||||
func (f *Fluent) close() (err error) {
|
||||
if f.conn != nil {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
} else {
|
||||
return
|
||||
// appendBuffer appends data to buffer with lock.
|
||||
func (f *Fluent) appendBuffer(data []byte) error {
|
||||
f.mubuff.Lock()
|
||||
defer f.mubuff.Unlock()
|
||||
if len(f.pending)+len(data) > f.Config.BufferLimit {
|
||||
return errors.New(fmt.Sprintf("fluent#appendBuffer: Buffer full, limit %v", f.Config.BufferLimit))
|
||||
}
|
||||
f.pending = append(f.pending, data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// close closes the connection.
|
||||
func (f *Fluent) close() {
|
||||
f.muconn.Lock()
|
||||
if f.conn != nil {
|
||||
f.conn.Close()
|
||||
f.conn = nil
|
||||
}
|
||||
return
|
||||
f.muconn.Unlock()
|
||||
}
|
||||
|
||||
// connect establishes a new connection using the specified transport.
|
||||
func (f *Fluent) connect() (err error) {
|
||||
f.muconn.Lock()
|
||||
defer f.muconn.Unlock()
|
||||
|
||||
switch f.Config.FluentNetwork {
|
||||
case "tcp":
|
||||
f.conn, err = net.DialTimeout(f.Config.FluentNetwork, f.Config.FluentHost+":"+strconv.Itoa(f.Config.FluentPort), f.Config.Timeout)
|
||||
|
@ -218,6 +253,10 @@ func (f *Fluent) connect() (err error) {
|
|||
default:
|
||||
err = net.UnknownNetworkError(f.Config.FluentNetwork)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
f.reconnecting = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -226,44 +265,45 @@ func e(x, y float64) int {
|
|||
}
|
||||
|
||||
func (f *Fluent) reconnect() {
|
||||
go func() {
|
||||
for i := 0; ; i++ {
|
||||
err := f.connect()
|
||||
if err == nil {
|
||||
f.mu.Lock()
|
||||
f.reconnecting = false
|
||||
f.mu.Unlock()
|
||||
break
|
||||
} else {
|
||||
if i == f.Config.MaxRetry {
|
||||
panic("fluent#reconnect: failed to reconnect!")
|
||||
}
|
||||
waitTime := f.Config.RetryWait * e(defaultReconnectWaitIncreRate, float64(i-1))
|
||||
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
||||
}
|
||||
for i := 0; ; i++ {
|
||||
err := f.connect()
|
||||
if err == nil {
|
||||
f.send()
|
||||
return
|
||||
}
|
||||
}()
|
||||
if i == f.Config.MaxRetry {
|
||||
// TODO: What we can do when connection failed MaxRetry times?
|
||||
panic("fluent#reconnect: failed to reconnect!")
|
||||
}
|
||||
waitTime := f.Config.RetryWait * e(defaultReconnectWaitIncreRate, float64(i-1))
|
||||
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fluent) flushBuffer() {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.pending = f.pending[0:0]
|
||||
}
|
||||
func (f *Fluent) send() error {
|
||||
f.muconn.Lock()
|
||||
defer f.muconn.Unlock()
|
||||
|
||||
func (f *Fluent) send() (err error) {
|
||||
if f.conn == nil {
|
||||
if f.reconnecting == false {
|
||||
f.mu.Lock()
|
||||
f.reconnecting = true
|
||||
f.mu.Unlock()
|
||||
f.reconnect()
|
||||
go f.reconnect()
|
||||
}
|
||||
err = errors.New("fluent#send: can't send logs, client is reconnecting")
|
||||
} else {
|
||||
f.mu.Lock()
|
||||
_, err = f.conn.Write(f.pending)
|
||||
f.mu.Unlock()
|
||||
return errors.New("fluent#send: can't send logs, client is reconnecting")
|
||||
}
|
||||
return
|
||||
|
||||
f.mubuff.Lock()
|
||||
defer f.mubuff.Unlock()
|
||||
|
||||
var err error
|
||||
if len(f.pending) > 0 {
|
||||
_, err = f.conn.Write(f.pending)
|
||||
if err != nil {
|
||||
f.conn.Close()
|
||||
f.conn = nil
|
||||
} else {
|
||||
f.pending = f.pending[:0]
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue