vendor: go.uber.org/zap v1.21.0
full diff: https://github.com/uber-go/zap/compare/v1.17.0...v1.21.0 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
066fb6c69e
commit
b0e20e1b3c
23 changed files with 720 additions and 184 deletions
|
@ -156,7 +156,7 @@ require (
|
|||
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/oauth2 v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
|
|
|
@ -138,6 +138,8 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
|
|||
github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k=
|
||||
github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -1147,6 +1149,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
|
@ -1154,8 +1157,9 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i
|
|||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
|
12
vendor/go.uber.org/zap/.readme.tmpl
generated
vendored
12
vendor/go.uber.org/zap/.readme.tmpl
generated
vendored
|
@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt).
|
|||
|
||||
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
|
||||
benchmarking against slightly older versions of other packages. Versions are
|
||||
pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
|
||||
pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
|
||||
|
||||
[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
|
||||
[doc]: https://godoc.org/go.uber.org/zap
|
||||
[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
|
||||
[ci]: https://travis-ci.com/uber-go/zap
|
||||
[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
|
||||
[doc]: https://pkg.go.dev/go.uber.org/zap
|
||||
[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
|
||||
[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
|
||||
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
|
||||
[cov]: https://codecov.io/gh/uber-go/zap
|
||||
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
|
||||
[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
|
||||
[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
|
||||
|
||||
|
|
104
vendor/go.uber.org/zap/CHANGELOG.md
generated
vendored
104
vendor/go.uber.org/zap/CHANGELOG.md
generated
vendored
|
@ -1,4 +1,108 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 1.21.0 (7 Feb 2022)
|
||||
|
||||
Enhancements:
|
||||
* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
|
||||
* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
|
||||
string.
|
||||
|
||||
Bugfixes:
|
||||
* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
|
||||
|
||||
Other changes:
|
||||
* [#1052][]: Improve encoding performance when the `AddCaller` and
|
||||
`AddStacktrace` options are used together.
|
||||
|
||||
[#1047]: https://github.com/uber-go/zap/pull/1047
|
||||
[#1048]: https://github.com/uber-go/zap/pull/1048
|
||||
[#1052]: https://github.com/uber-go/zap/pull/1052
|
||||
[#1058]: https://github.com/uber-go/zap/pull/1058
|
||||
|
||||
Thanks to @aerosol and @Techassi for their contributions to this release.
|
||||
|
||||
## 1.20.0 (4 Jan 2022)
|
||||
|
||||
Enhancements:
|
||||
* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
|
||||
characters between log statements.
|
||||
* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
|
||||
encoding of reflected log fields.
|
||||
|
||||
Bugfixes:
|
||||
* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
|
||||
* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
|
||||
methods when the methods return.
|
||||
* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
|
||||
|
||||
Other changes:
|
||||
* [#1028][]: Drop support for Go < 1.15.
|
||||
|
||||
[#554]: https://github.com/uber-go/zap/pull/554
|
||||
[#989]: https://github.com/uber-go/zap/pull/989
|
||||
[#1011]: https://github.com/uber-go/zap/pull/1011
|
||||
[#1017]: https://github.com/uber-go/zap/pull/1017
|
||||
[#1028]: https://github.com/uber-go/zap/pull/1028
|
||||
[#1033]: https://github.com/uber-go/zap/pull/1033
|
||||
[#1039]: https://github.com/uber-go/zap/pull/1039
|
||||
|
||||
Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
|
||||
|
||||
## 1.19.1 (8 Sep 2021)
|
||||
|
||||
Bugfixes:
|
||||
* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
|
||||
* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
|
||||
|
||||
[#1001]: https://github.com/uber-go/zap/pull/1001
|
||||
[#1003]: https://github.com/uber-go/zap/pull/1003
|
||||
|
||||
## 1.19.0 (9 Aug 2021)
|
||||
|
||||
Enhancements:
|
||||
* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
|
||||
* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
|
||||
better.
|
||||
|
||||
[#975]: https://github.com/uber-go/zap/pull/975
|
||||
[#984]: https://github.com/uber-go/zap/pull/984
|
||||
|
||||
Thanks to @lancoLiu and @thockin for their contributions to this release.
|
||||
|
||||
## 1.18.1 (28 Jun 2021)
|
||||
|
||||
Bugfixes:
|
||||
* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
|
||||
|
||||
[#974]: https://github.com/uber-go/zap/pull/974
|
||||
|
||||
## 1.18.0 (28 Jun 2021)
|
||||
|
||||
Enhancements:
|
||||
* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
|
||||
messages in-memory and flushes them periodically.
|
||||
* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
|
||||
* [#897][]: Add `zap.WithClock` option to control the source of time via the
|
||||
new `zapcore.Clock` interface.
|
||||
* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
|
||||
methods don't match expectations.
|
||||
* [#943][]: Add support for filtering by level or arbitrary matcher function to
|
||||
`zaptest/observer`.
|
||||
* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
|
||||
`buffer.Buffer`.
|
||||
|
||||
Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
|
||||
for their contributions to this release.
|
||||
|
||||
[#691]: https://github.com/uber-go/zap/pull/691
|
||||
[#897]: https://github.com/uber-go/zap/pull/897
|
||||
[#943]: https://github.com/uber-go/zap/pull/943
|
||||
[#949]: https://github.com/uber-go/zap/pull/949
|
||||
[#961]: https://github.com/uber-go/zap/pull/961
|
||||
[#971]: https://github.com/uber-go/zap/pull/971
|
||||
|
||||
## 1.17.0 (25 May 2021)
|
||||
|
||||
|
|
44
vendor/go.uber.org/zap/README.md
generated
vendored
44
vendor/go.uber.org/zap/README.md
generated
vendored
|
@ -66,38 +66,38 @@ Log a message and 10 fields:
|
|||
|
||||
| Package | Time | Time % to zap | Objects Allocated |
|
||||
| :------ | :--: | :-----------: | :---------------: |
|
||||
| :zap: zap | 862 ns/op | +0% | 5 allocs/op
|
||||
| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
|
||||
| zerolog | 4021 ns/op | +366% | 76 allocs/op
|
||||
| go-kit | 4542 ns/op | +427% | 105 allocs/op
|
||||
| apex/log | 26785 ns/op | +3007% | 115 allocs/op
|
||||
| logrus | 29501 ns/op | +3322% | 125 allocs/op
|
||||
| log15 | 29906 ns/op | +3369% | 122 allocs/op
|
||||
| :zap: zap | 2900 ns/op | +0% | 5 allocs/op
|
||||
| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op
|
||||
| zerolog | 10639 ns/op | +267% | 32 allocs/op
|
||||
| go-kit | 14434 ns/op | +398% | 59 allocs/op
|
||||
| logrus | 17104 ns/op | +490% | 81 allocs/op
|
||||
| apex/log | 32424 ns/op | +1018% | 66 allocs/op
|
||||
| log15 | 33579 ns/op | +1058% | 76 allocs/op
|
||||
|
||||
Log a message with a logger that already has 10 fields of context:
|
||||
|
||||
| Package | Time | Time % to zap | Objects Allocated |
|
||||
| :------ | :--: | :-----------: | :---------------: |
|
||||
| :zap: zap | 126 ns/op | +0% | 0 allocs/op
|
||||
| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
|
||||
| zerolog | 88 ns/op | -30% | 0 allocs/op
|
||||
| go-kit | 5087 ns/op | +3937% | 103 allocs/op
|
||||
| log15 | 18548 ns/op | +14621% | 73 allocs/op
|
||||
| apex/log | 26012 ns/op | +20544% | 104 allocs/op
|
||||
| logrus | 27236 ns/op | +21516% | 113 allocs/op
|
||||
| :zap: zap | 373 ns/op | +0% | 0 allocs/op
|
||||
| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op
|
||||
| zerolog | 288 ns/op | -23% | 0 allocs/op
|
||||
| go-kit | 11785 ns/op | +3060% | 58 allocs/op
|
||||
| logrus | 19629 ns/op | +5162% | 70 allocs/op
|
||||
| log15 | 21866 ns/op | +5762% | 72 allocs/op
|
||||
| apex/log | 30890 ns/op | +8182% | 55 allocs/op
|
||||
|
||||
Log a static string, without any context or `printf`-style templating:
|
||||
|
||||
| Package | Time | Time % to zap | Objects Allocated |
|
||||
| :------ | :--: | :-----------: | :---------------: |
|
||||
| :zap: zap | 118 ns/op | +0% | 0 allocs/op
|
||||
| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
|
||||
| zerolog | 93 ns/op | -21% | 0 allocs/op
|
||||
| go-kit | 280 ns/op | +137% | 11 allocs/op
|
||||
| standard library | 499 ns/op | +323% | 2 allocs/op
|
||||
| apex/log | 1990 ns/op | +1586% | 10 allocs/op
|
||||
| logrus | 3129 ns/op | +2552% | 24 allocs/op
|
||||
| log15 | 3887 ns/op | +3194% | 23 allocs/op
|
||||
| :zap: zap | 381 ns/op | +0% | 0 allocs/op
|
||||
| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op
|
||||
| zerolog | 369 ns/op | -3% | 0 allocs/op
|
||||
| standard library | 385 ns/op | +1% | 2 allocs/op
|
||||
| go-kit | 606 ns/op | +59% | 11 allocs/op
|
||||
| logrus | 1730 ns/op | +354% | 25 allocs/op
|
||||
| apex/log | 1998 ns/op | +424% | 7 allocs/op
|
||||
| log15 | 4546 ns/op | +1093% | 22 allocs/op
|
||||
|
||||
## Development Status: Stable
|
||||
|
||||
|
|
18
vendor/go.uber.org/zap/buffer/buffer.go
generated
vendored
18
vendor/go.uber.org/zap/buffer/buffer.go
generated
vendored
|
@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) {
|
|||
return len(bs), nil
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte to the Buffer.
|
||||
//
|
||||
// Error returned is always nil, function signature is compatible
|
||||
// with bytes.Buffer and bufio.Writer
|
||||
func (b *Buffer) WriteByte(v byte) error {
|
||||
b.AppendByte(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteString writes a string to the Buffer.
|
||||
//
|
||||
// Error returned is always nil, function signature is compatible
|
||||
// with bytes.Buffer and bufio.Writer
|
||||
func (b *Buffer) WriteString(s string) (int, error) {
|
||||
b.AppendString(s)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// TrimNewline trims any final "\n" byte from the end of the buffer.
|
||||
func (b *Buffer) TrimNewline() {
|
||||
if i := len(b.bs) - 1; i >= 0 {
|
||||
|
|
1
vendor/go.uber.org/zap/global.go
generated
vendored
1
vendor/go.uber.org/zap/global.go
generated
vendored
|
@ -31,6 +31,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
_stdLogDefaultDepth = 1
|
||||
_loggerWriterDepth = 2
|
||||
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
|
||||
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
|
||||
|
|
17
vendor/go.uber.org/zap/level.go
generated
vendored
17
vendor/go.uber.org/zap/level.go
generated
vendored
|
@ -86,6 +86,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
|
|||
return a
|
||||
}
|
||||
|
||||
// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
|
||||
// representation of the log level. If the provided ASCII representation is
|
||||
// invalid an error is returned.
|
||||
//
|
||||
// This is particularly useful when dealing with text input to configure log
|
||||
// levels.
|
||||
func ParseAtomicLevel(text string) (AtomicLevel, error) {
|
||||
a := NewAtomicLevel()
|
||||
l, err := zapcore.ParseLevel(text)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
|
||||
a.SetLevel(l)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Enabled implements the zapcore.LevelEnabler interface, which allows the
|
||||
// AtomicLevel to be used in place of traditional static levels.
|
||||
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
|
||||
|
|
80
vendor/go.uber.org/zap/logger.go
generated
vendored
80
vendor/go.uber.org/zap/logger.go
generated
vendored
|
@ -24,10 +24,9 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap/internal/bufferpool"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
|
@ -51,6 +50,8 @@ type Logger struct {
|
|||
addStack zapcore.LevelEnabler
|
||||
|
||||
callerSkip int
|
||||
|
||||
clock zapcore.Clock
|
||||
}
|
||||
|
||||
// New constructs a new Logger from the provided zapcore.Core and Options. If
|
||||
|
@ -71,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
|
|||
core: core,
|
||||
errorOutput: zapcore.Lock(os.Stderr),
|
||||
addStack: zapcore.FatalLevel + 1,
|
||||
clock: zapcore.DefaultClock,
|
||||
}
|
||||
return log.WithOptions(options...)
|
||||
}
|
||||
|
@ -85,6 +87,7 @@ func NewNop() *Logger {
|
|||
core: zapcore.NewNopCore(),
|
||||
errorOutput: zapcore.AddSync(ioutil.Discard),
|
||||
addStack: zapcore.FatalLevel + 1,
|
||||
clock: zapcore.DefaultClock,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,8 +259,10 @@ func (log *Logger) clone() *Logger {
|
|||
}
|
||||
|
||||
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
|
||||
// check must always be called directly by a method in the Logger interface
|
||||
// (e.g., Check, Info, Fatal).
|
||||
// Logger.check must always be called directly by a method in the
|
||||
// Logger interface (e.g., Check, Info, Fatal).
|
||||
// This skips Logger.check and the Info/Fatal/Check/etc. method that
|
||||
// called it.
|
||||
const callerSkipOffset = 2
|
||||
|
||||
// Check the level first to reduce the cost of disabled log calls.
|
||||
|
@ -270,7 +275,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
|
|||
// log message will actually be written somewhere.
|
||||
ent := zapcore.Entry{
|
||||
LoggerName: log.name,
|
||||
Time: time.Now(),
|
||||
Time: log.clock.Now(),
|
||||
Level: lvl,
|
||||
Message: msg,
|
||||
}
|
||||
|
@ -304,42 +309,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
|
|||
|
||||
// Thread the error output through to the CheckedEntry.
|
||||
ce.ErrorOutput = log.errorOutput
|
||||
if log.addCaller {
|
||||
frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
|
||||
if !defined {
|
||||
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
|
||||
|
||||
addStack := log.addStack.Enabled(ce.Level)
|
||||
if !log.addCaller && !addStack {
|
||||
return ce
|
||||
}
|
||||
|
||||
// Adding the caller or stack trace requires capturing the callers of
|
||||
// this function. We'll share information between these two.
|
||||
stackDepth := stacktraceFirst
|
||||
if addStack {
|
||||
stackDepth = stacktraceFull
|
||||
}
|
||||
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
|
||||
defer stack.Free()
|
||||
|
||||
if stack.Count() == 0 {
|
||||
if log.addCaller {
|
||||
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
|
||||
log.errorOutput.Sync()
|
||||
}
|
||||
return ce
|
||||
}
|
||||
|
||||
ce.Entry.Caller = zapcore.EntryCaller{
|
||||
Defined: defined,
|
||||
frame, more := stack.Next()
|
||||
|
||||
if log.addCaller {
|
||||
ce.Caller = zapcore.EntryCaller{
|
||||
Defined: frame.PC != 0,
|
||||
PC: frame.PC,
|
||||
File: frame.File,
|
||||
Line: frame.Line,
|
||||
Function: frame.Function,
|
||||
}
|
||||
}
|
||||
if log.addStack.Enabled(ce.Entry.Level) {
|
||||
ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
|
||||
|
||||
if addStack {
|
||||
buffer := bufferpool.Get()
|
||||
defer buffer.Free()
|
||||
|
||||
stackfmt := newStackFormatter(buffer)
|
||||
|
||||
// We've already extracted the first frame, so format that
|
||||
// separately and defer to stackfmt for the rest.
|
||||
stackfmt.FormatFrame(frame)
|
||||
if more {
|
||||
stackfmt.FormatStack(stack)
|
||||
}
|
||||
ce.Stack = buffer.String()
|
||||
}
|
||||
|
||||
return ce
|
||||
}
|
||||
|
||||
// getCallerFrame gets caller frame. The argument skip is the number of stack
|
||||
// frames to ascend, with 0 identifying the caller of getCallerFrame. The
|
||||
// boolean ok is false if it was not possible to recover the information.
|
||||
//
|
||||
// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
|
||||
func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
|
||||
const skipOffset = 2 // skip getCallerFrame and Callers
|
||||
|
||||
pc := make([]uintptr, 1)
|
||||
numFrames := runtime.Callers(skip+skipOffset, pc)
|
||||
if numFrames < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
frame, _ = runtime.CallersFrames(pc).Next()
|
||||
return frame, frame.PC != 0
|
||||
}
|
||||
|
|
8
vendor/go.uber.org/zap/options.go
generated
vendored
8
vendor/go.uber.org/zap/options.go
generated
vendored
|
@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option {
|
|||
log.onFatal = action
|
||||
})
|
||||
}
|
||||
|
||||
// WithClock specifies the clock used by the logger to determine the current
|
||||
// time for logged entries. Defaults to the system clock with time.Now.
|
||||
func WithClock(clock zapcore.Clock) Option {
|
||||
return optionFunc(func(log *Logger) {
|
||||
log.clock = clock
|
||||
})
|
||||
}
|
||||
|
|
179
vendor/go.uber.org/zap/stacktrace.go
generated
vendored
179
vendor/go.uber.org/zap/stacktrace.go
generated
vendored
|
@ -24,62 +24,153 @@ import (
|
|||
"runtime"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap/buffer"
|
||||
"go.uber.org/zap/internal/bufferpool"
|
||||
)
|
||||
|
||||
var (
|
||||
_stacktracePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return newProgramCounters(64)
|
||||
},
|
||||
}
|
||||
var _stacktracePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &stacktrace{
|
||||
storage: make([]uintptr, 64),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type stacktrace struct {
|
||||
pcs []uintptr // program counters; always a subslice of storage
|
||||
frames *runtime.Frames
|
||||
|
||||
// The size of pcs varies depending on requirements:
|
||||
// it will be one if the only the first frame was requested,
|
||||
// and otherwise it will reflect the depth of the call stack.
|
||||
//
|
||||
// storage decouples the slice we need (pcs) from the slice we pool.
|
||||
// We will always allocate a reasonably large storage, but we'll use
|
||||
// only as much of it as we need.
|
||||
storage []uintptr
|
||||
}
|
||||
|
||||
// stacktraceDepth specifies how deep of a stack trace should be captured.
|
||||
type stacktraceDepth int
|
||||
|
||||
const (
|
||||
// stacktraceFirst captures only the first frame.
|
||||
stacktraceFirst stacktraceDepth = iota
|
||||
|
||||
// stacktraceFull captures the entire call stack, allocating more
|
||||
// storage for it if needed.
|
||||
stacktraceFull
|
||||
)
|
||||
|
||||
// captureStacktrace captures a stack trace of the specified depth, skipping
|
||||
// the provided number of frames. skip=0 identifies the caller of
|
||||
// captureStacktrace.
|
||||
//
|
||||
// The caller must call Free on the returned stacktrace after using it.
|
||||
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
|
||||
stack := _stacktracePool.Get().(*stacktrace)
|
||||
|
||||
switch depth {
|
||||
case stacktraceFirst:
|
||||
stack.pcs = stack.storage[:1]
|
||||
case stacktraceFull:
|
||||
stack.pcs = stack.storage
|
||||
}
|
||||
|
||||
// Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
|
||||
// itself. +2 to skip captureStacktrace and runtime.Callers.
|
||||
numFrames := runtime.Callers(
|
||||
skip+2,
|
||||
stack.pcs,
|
||||
)
|
||||
|
||||
// runtime.Callers truncates the recorded stacktrace if there is no
|
||||
// room in the provided slice. For the full stack trace, keep expanding
|
||||
// storage until there are fewer frames than there is room.
|
||||
if depth == stacktraceFull {
|
||||
pcs := stack.pcs
|
||||
for numFrames == len(pcs) {
|
||||
pcs = make([]uintptr, len(pcs)*2)
|
||||
numFrames = runtime.Callers(skip+2, pcs)
|
||||
}
|
||||
|
||||
// Discard old storage instead of returning it to the pool.
|
||||
// This will adjust the pool size over time if stack traces are
|
||||
// consistently very deep.
|
||||
stack.storage = pcs
|
||||
stack.pcs = pcs[:numFrames]
|
||||
} else {
|
||||
stack.pcs = stack.pcs[:numFrames]
|
||||
}
|
||||
|
||||
stack.frames = runtime.CallersFrames(stack.pcs)
|
||||
return stack
|
||||
}
|
||||
|
||||
// Free releases resources associated with this stacktrace
|
||||
// and returns it back to the pool.
|
||||
func (st *stacktrace) Free() {
|
||||
st.frames = nil
|
||||
st.pcs = nil
|
||||
_stacktracePool.Put(st)
|
||||
}
|
||||
|
||||
// Count reports the total number of frames in this stacktrace.
|
||||
// Count DOES NOT change as Next is called.
|
||||
func (st *stacktrace) Count() int {
|
||||
return len(st.pcs)
|
||||
}
|
||||
|
||||
// Next returns the next frame in the stack trace,
|
||||
// and a boolean indicating whether there are more after it.
|
||||
func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
|
||||
return st.frames.Next()
|
||||
}
|
||||
|
||||
func takeStacktrace(skip int) string {
|
||||
stack := captureStacktrace(skip+1, stacktraceFull)
|
||||
defer stack.Free()
|
||||
|
||||
buffer := bufferpool.Get()
|
||||
defer buffer.Free()
|
||||
programCounters := _stacktracePool.Get().(*programCounters)
|
||||
defer _stacktracePool.Put(programCounters)
|
||||
|
||||
var numFrames int
|
||||
for {
|
||||
// Skip the call to runtime.Callers and takeStacktrace so that the
|
||||
// program counters start at the caller of takeStacktrace.
|
||||
numFrames = runtime.Callers(skip+2, programCounters.pcs)
|
||||
if numFrames < len(programCounters.pcs) {
|
||||
break
|
||||
}
|
||||
// Don't put the too-short counter slice back into the pool; this lets
|
||||
// the pool adjust if we consistently take deep stacktraces.
|
||||
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
|
||||
}
|
||||
|
||||
i := 0
|
||||
frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
|
||||
|
||||
// Note: On the last iteration, frames.Next() returns false, with a valid
|
||||
// frame, but we ignore this frame. The last frame is a a runtime frame which
|
||||
// adds noise, since it's only either runtime.main or runtime.goexit.
|
||||
for frame, more := frames.Next(); more; frame, more = frames.Next() {
|
||||
if i != 0 {
|
||||
buffer.AppendByte('\n')
|
||||
}
|
||||
i++
|
||||
buffer.AppendString(frame.Function)
|
||||
buffer.AppendByte('\n')
|
||||
buffer.AppendByte('\t')
|
||||
buffer.AppendString(frame.File)
|
||||
buffer.AppendByte(':')
|
||||
buffer.AppendInt(int64(frame.Line))
|
||||
}
|
||||
|
||||
stackfmt := newStackFormatter(buffer)
|
||||
stackfmt.FormatStack(stack)
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
type programCounters struct {
|
||||
pcs []uintptr
|
||||
// stackFormatter formats a stack trace into a readable string representation.
|
||||
type stackFormatter struct {
|
||||
b *buffer.Buffer
|
||||
nonEmpty bool // whehther we've written at least one frame already
|
||||
}
|
||||
|
||||
func newProgramCounters(size int) *programCounters {
|
||||
return &programCounters{make([]uintptr, size)}
|
||||
// newStackFormatter builds a new stackFormatter.
|
||||
func newStackFormatter(b *buffer.Buffer) stackFormatter {
|
||||
return stackFormatter{b: b}
|
||||
}
|
||||
|
||||
// FormatStack formats all remaining frames in the provided stacktrace -- minus
|
||||
// the final runtime.main/runtime.goexit frame.
|
||||
func (sf *stackFormatter) FormatStack(stack *stacktrace) {
|
||||
// Note: On the last iteration, frames.Next() returns false, with a valid
|
||||
// frame, but we ignore this frame. The last frame is a a runtime frame which
|
||||
// adds noise, since it's only either runtime.main or runtime.goexit.
|
||||
for frame, more := stack.Next(); more; frame, more = stack.Next() {
|
||||
sf.FormatFrame(frame)
|
||||
}
|
||||
}
|
||||
|
||||
// FormatFrame formats the given frame.
|
||||
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
|
||||
if sf.nonEmpty {
|
||||
sf.b.AppendByte('\n')
|
||||
}
|
||||
sf.nonEmpty = true
|
||||
sf.b.AppendString(frame.Function)
|
||||
sf.b.AppendByte('\n')
|
||||
sf.b.AppendByte('\t')
|
||||
sf.b.AppendString(frame.File)
|
||||
sf.b.AppendByte(':')
|
||||
sf.b.AppendInt(int64(frame.Line))
|
||||
}
|
||||
|
|
4
vendor/go.uber.org/zap/sugar.go
generated
vendored
4
vendor/go.uber.org/zap/sugar.go
generated
vendored
|
@ -266,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
|
|||
|
||||
// Make sure this element isn't a dangling key.
|
||||
if i == len(args)-1 {
|
||||
s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i]))
|
||||
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
|
|||
|
||||
// If we encountered any invalid key-value pairs, log an error.
|
||||
if len(invalid) > 0 {
|
||||
s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid))
|
||||
s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
|
188
vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
generated
vendored
Normal file
188
vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
// Copyright (c) 2021 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
package zapcore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
const (
|
||||
// _defaultBufferSize specifies the default size used by Buffer.
|
||||
_defaultBufferSize = 256 * 1024 // 256 kB
|
||||
|
||||
// _defaultFlushInterval specifies the default flush interval for
|
||||
// Buffer.
|
||||
_defaultFlushInterval = 30 * time.Second
|
||||
)
|
||||
|
||||
// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
|
||||
// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
|
||||
// fixed interval--whichever comes first.
|
||||
//
|
||||
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
|
||||
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
|
||||
type BufferedWriteSyncer struct {
|
||||
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
|
||||
// writes.
|
||||
//
|
||||
// This field is required.
|
||||
WS WriteSyncer
|
||||
|
||||
// Size specifies the maximum amount of data the writer will buffered
|
||||
// before flushing.
|
||||
//
|
||||
// Defaults to 256 kB if unspecified.
|
||||
Size int
|
||||
|
||||
// FlushInterval specifies how often the writer should flush data if
|
||||
// there have been no writes.
|
||||
//
|
||||
// Defaults to 30 seconds if unspecified.
|
||||
FlushInterval time.Duration
|
||||
|
||||
// Clock, if specified, provides control of the source of time for the
|
||||
// writer.
|
||||
//
|
||||
// Defaults to the system clock.
|
||||
Clock Clock
|
||||
|
||||
// unexported fields for state
|
||||
mu sync.Mutex
|
||||
initialized bool // whether initialize() has run
|
||||
stopped bool // whether Stop() has run
|
||||
writer *bufio.Writer
|
||||
ticker *time.Ticker
|
||||
stop chan struct{} // closed when flushLoop should stop
|
||||
done chan struct{} // closed when flushLoop has stopped
|
||||
}
|
||||
|
||||
func (s *BufferedWriteSyncer) initialize() {
|
||||
size := s.Size
|
||||
if size == 0 {
|
||||
size = _defaultBufferSize
|
||||
}
|
||||
|
||||
flushInterval := s.FlushInterval
|
||||
if flushInterval == 0 {
|
||||
flushInterval = _defaultFlushInterval
|
||||
}
|
||||
|
||||
if s.Clock == nil {
|
||||
s.Clock = DefaultClock
|
||||
}
|
||||
|
||||
s.ticker = s.Clock.NewTicker(flushInterval)
|
||||
s.writer = bufio.NewWriterSize(s.WS, size)
|
||||
s.stop = make(chan struct{})
|
||||
s.done = make(chan struct{})
|
||||
s.initialized = true
|
||||
go s.flushLoop()
|
||||
}
|
||||
|
||||
// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
|
||||
// and log data will be flushed to disk when the buffer is full or periodically.
|
||||
func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.initialized {
|
||||
s.initialize()
|
||||
}
|
||||
|
||||
// To avoid partial writes from being flushed, we manually flush the existing buffer if:
|
||||
// * The current write doesn't fit into the buffer fully, and
|
||||
// * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
|
||||
if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
|
||||
if err := s.writer.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return s.writer.Write(bs)
|
||||
}
|
||||
|
||||
// Sync flushes buffered log data into disk directly.
|
||||
func (s *BufferedWriteSyncer) Sync() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var err error
|
||||
if s.initialized {
|
||||
err = s.writer.Flush()
|
||||
}
|
||||
|
||||
return multierr.Append(err, s.WS.Sync())
|
||||
}
|
||||
|
||||
// flushLoop flushes the buffer at the configured interval until Stop is
|
||||
// called.
|
||||
func (s *BufferedWriteSyncer) flushLoop() {
|
||||
defer close(s.done)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ticker.C:
|
||||
// we just simply ignore error here
|
||||
// because the underlying bufio writer stores any errors
|
||||
// and we return any error from Sync() as part of the close
|
||||
_ = s.Sync()
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop closes the buffer, cleans up background goroutines, and flushes
|
||||
// remaining unwritten data.
|
||||
func (s *BufferedWriteSyncer) Stop() (err error) {
|
||||
var stopped bool
|
||||
|
||||
// Critical section.
|
||||
func() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.initialized {
|
||||
return
|
||||
}
|
||||
|
||||
stopped = s.stopped
|
||||
if stopped {
|
||||
return
|
||||
}
|
||||
s.stopped = true
|
||||
|
||||
s.ticker.Stop()
|
||||
close(s.stop) // tell flushLoop to stop
|
||||
<-s.done // and wait until it has
|
||||
}()
|
||||
|
||||
// Don't call Sync on consecutive Stops.
|
||||
if !stopped {
|
||||
err = s.Sync()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
32
vendor/go.uber.org/zap/global_prego112.go → vendor/go.uber.org/zap/zapcore/clock.go
generated
vendored
32
vendor/go.uber.org/zap/global_prego112.go → vendor/go.uber.org/zap/zapcore/clock.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2019 Uber Technologies, Inc.
|
||||
// Copyright (c) 2021 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -18,9 +18,31 @@
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
// See #682 for more information.
|
||||
// +build !go1.12
|
||||
package zapcore
|
||||
|
||||
package zap
|
||||
import "time"
|
||||
|
||||
const _stdLogDefaultDepth = 2
|
||||
// DefaultClock is the default clock used by Zap in operations that require
|
||||
// time. This clock uses the system clock for all operations.
|
||||
var DefaultClock = systemClock{}
|
||||
|
||||
// Clock is a source of time for logged entries.
|
||||
type Clock interface {
|
||||
// Now returns the current local time.
|
||||
Now() time.Time
|
||||
|
||||
// NewTicker returns *time.Ticker that holds a channel
|
||||
// that delivers "ticks" of a clock.
|
||||
NewTicker(time.Duration) *time.Ticker
|
||||
}
|
||||
|
||||
// systemClock implements default Clock that uses system time.
|
||||
type systemClock struct{}
|
||||
|
||||
func (systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
|
||||
return time.NewTicker(duration)
|
||||
}
|
6
vendor/go.uber.org/zap/zapcore/console_encoder.go
generated
vendored
6
vendor/go.uber.org/zap/zapcore/console_encoder.go
generated
vendored
|
@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
|
|||
line.AppendString(ent.Stack)
|
||||
}
|
||||
|
||||
if c.LineEnding != "" {
|
||||
line.AppendString(c.LineEnding)
|
||||
} else {
|
||||
line.AppendString(DefaultLineEnding)
|
||||
}
|
||||
line.AppendString(c.LineEnding)
|
||||
return line, nil
|
||||
}
|
||||
|
||||
|
|
21
vendor/go.uber.org/zap/zapcore/encoder.go
generated
vendored
21
vendor/go.uber.org/zap/zapcore/encoder.go
generated
vendored
|
@ -22,6 +22,7 @@ package zapcore
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap/buffer"
|
||||
|
@ -312,14 +313,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
|
|||
type EncoderConfig struct {
|
||||
// Set the keys used for each log entry. If any key is empty, that portion
|
||||
// of the entry is omitted.
|
||||
MessageKey string `json:"messageKey" yaml:"messageKey"`
|
||||
LevelKey string `json:"levelKey" yaml:"levelKey"`
|
||||
TimeKey string `json:"timeKey" yaml:"timeKey"`
|
||||
NameKey string `json:"nameKey" yaml:"nameKey"`
|
||||
CallerKey string `json:"callerKey" yaml:"callerKey"`
|
||||
FunctionKey string `json:"functionKey" yaml:"functionKey"`
|
||||
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
|
||||
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
|
||||
MessageKey string `json:"messageKey" yaml:"messageKey"`
|
||||
LevelKey string `json:"levelKey" yaml:"levelKey"`
|
||||
TimeKey string `json:"timeKey" yaml:"timeKey"`
|
||||
NameKey string `json:"nameKey" yaml:"nameKey"`
|
||||
CallerKey string `json:"callerKey" yaml:"callerKey"`
|
||||
FunctionKey string `json:"functionKey" yaml:"functionKey"`
|
||||
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
|
||||
SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
|
||||
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
|
||||
// Configure the primitive representations of common complex types. For
|
||||
// example, some users may want all time.Times serialized as floating-point
|
||||
// seconds since epoch, while others may prefer ISO8601 strings.
|
||||
|
@ -330,6 +332,9 @@ type EncoderConfig struct {
|
|||
// Unlike the other primitive type encoders, EncodeName is optional. The
|
||||
// zero value falls back to FullNameEncoder.
|
||||
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
|
||||
// Configure the encoder for interface{} type objects.
|
||||
// If not provided, objects are encoded using json.Encoder
|
||||
NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
|
||||
// Configures the field separator used by the console encoder. Defaults
|
||||
// to tab.
|
||||
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
|
||||
|
|
10
vendor/go.uber.org/zap/zapcore/entry.go
generated
vendored
10
vendor/go.uber.org/zap/zapcore/entry.go
generated
vendored
|
@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
|
|||
// If the entry is dirty, log an internal error; because the
|
||||
// CheckedEntry is being used after it was returned to the pool,
|
||||
// the message may be an amalgamation from multiple call sites.
|
||||
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry)
|
||||
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
|
||||
ce.ErrorOutput.Sync()
|
||||
}
|
||||
return
|
||||
|
@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) {
|
|||
for i := range ce.cores {
|
||||
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
|
||||
}
|
||||
if ce.ErrorOutput != nil {
|
||||
if err != nil {
|
||||
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err)
|
||||
ce.ErrorOutput.Sync()
|
||||
}
|
||||
if err != nil && ce.ErrorOutput != nil {
|
||||
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
|
||||
ce.ErrorOutput.Sync()
|
||||
}
|
||||
|
||||
should, msg := ce.should, ce.Message
|
||||
|
|
2
vendor/go.uber.org/zap/zapcore/error.go
generated
vendored
2
vendor/go.uber.org/zap/zapcore/error.go
generated
vendored
|
@ -83,7 +83,7 @@ type errorGroup interface {
|
|||
Errors() []error
|
||||
}
|
||||
|
||||
// Note that errArry and errArrayElem are very similar to the version
|
||||
// Note that errArray and errArrayElem are very similar to the version
|
||||
// implemented in the top-level error.go file. We can't re-use this because
|
||||
// that would require exporting errArray as part of the zapcore API.
|
||||
|
||||
|
|
104
vendor/go.uber.org/zap/zapcore/json_encoder.go
generated
vendored
104
vendor/go.uber.org/zap/zapcore/json_encoder.go
generated
vendored
|
@ -22,7 +22,6 @@ package zapcore
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -64,7 +63,7 @@ type jsonEncoder struct {
|
|||
|
||||
// for encoding generic values by reflection
|
||||
reflectBuf *buffer.Buffer
|
||||
reflectEnc *json.Encoder
|
||||
reflectEnc ReflectedEncoder
|
||||
}
|
||||
|
||||
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
|
||||
|
@ -82,6 +81,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
|
|||
}
|
||||
|
||||
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
|
||||
if cfg.SkipLineEnding {
|
||||
cfg.LineEnding = ""
|
||||
} else if cfg.LineEnding == "" {
|
||||
cfg.LineEnding = DefaultLineEnding
|
||||
}
|
||||
|
||||
// If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
|
||||
if cfg.NewReflectedEncoder == nil {
|
||||
cfg.NewReflectedEncoder = defaultReflectedEncoder
|
||||
}
|
||||
|
||||
return &jsonEncoder{
|
||||
EncoderConfig: &cfg,
|
||||
buf: bufferpool.Get(),
|
||||
|
@ -118,6 +128,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
|
|||
enc.AppendComplex128(val)
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
|
||||
enc.addKey(key)
|
||||
enc.AppendComplex64(val)
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
|
||||
enc.addKey(key)
|
||||
enc.AppendDuration(val)
|
||||
|
@ -128,6 +143,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) {
|
|||
enc.AppendFloat64(val)
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AddFloat32(key string, val float32) {
|
||||
enc.addKey(key)
|
||||
enc.AppendFloat32(val)
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AddInt64(key string, val int64) {
|
||||
enc.addKey(key)
|
||||
enc.AppendInt64(val)
|
||||
|
@ -136,10 +156,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
|
|||
func (enc *jsonEncoder) resetReflectBuf() {
|
||||
if enc.reflectBuf == nil {
|
||||
enc.reflectBuf = bufferpool.Get()
|
||||
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
|
||||
|
||||
// For consistency with our custom JSON encoder.
|
||||
enc.reflectEnc.SetEscapeHTML(false)
|
||||
enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
|
||||
} else {
|
||||
enc.reflectBuf.Reset()
|
||||
}
|
||||
|
@ -201,10 +218,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
|
|||
}
|
||||
|
||||
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
|
||||
// Close ONLY new openNamespaces that are created during
|
||||
// AppendObject().
|
||||
old := enc.openNamespaces
|
||||
enc.openNamespaces = 0
|
||||
enc.addElementSeparator()
|
||||
enc.buf.AppendByte('{')
|
||||
err := obj.MarshalLogObject(enc)
|
||||
enc.buf.AppendByte('}')
|
||||
enc.closeOpenNamespaces()
|
||||
enc.openNamespaces = old
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -220,16 +243,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
|
|||
enc.buf.AppendByte('"')
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AppendComplex128(val complex128) {
|
||||
// appendComplex appends the encoded form of the provided complex128 value.
|
||||
// precision specifies the encoding precision for the real and imaginary
|
||||
// components of the complex number.
|
||||
func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
|
||||
enc.addElementSeparator()
|
||||
// Cast to a platform-independent, fixed-size type.
|
||||
r, i := float64(real(val)), float64(imag(val))
|
||||
enc.buf.AppendByte('"')
|
||||
// Because we're always in a quoted string, we can use strconv without
|
||||
// special-casing NaN and +/-Inf.
|
||||
enc.buf.AppendFloat(r, 64)
|
||||
enc.buf.AppendByte('+')
|
||||
enc.buf.AppendFloat(i, 64)
|
||||
enc.buf.AppendFloat(r, precision)
|
||||
// If imaginary part is less than 0, minus (-) sign is added by default
|
||||
// by AppendFloat.
|
||||
if i >= 0 {
|
||||
enc.buf.AppendByte('+')
|
||||
}
|
||||
enc.buf.AppendFloat(i, precision)
|
||||
enc.buf.AppendByte('i')
|
||||
enc.buf.AppendByte('"')
|
||||
}
|
||||
|
@ -292,29 +322,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
|
|||
enc.buf.AppendUint(val)
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
|
||||
func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
|
||||
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
|
||||
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
|
||||
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
|
||||
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
|
||||
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
|
||||
func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
|
||||
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
|
||||
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
|
||||
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
|
||||
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
func (enc *jsonEncoder) Clone() Encoder {
|
||||
clone := enc.clone()
|
||||
|
@ -335,7 +364,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
|
|||
final := enc.clone()
|
||||
final.buf.AppendByte('{')
|
||||
|
||||
if final.LevelKey != "" {
|
||||
if final.LevelKey != "" && final.EncodeLevel != nil {
|
||||
final.addKey(final.LevelKey)
|
||||
cur := final.buf.Len()
|
||||
final.EncodeLevel(ent.Level, final)
|
||||
|
@ -396,11 +425,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
|
|||
final.AddString(final.StacktraceKey, ent.Stack)
|
||||
}
|
||||
final.buf.AppendByte('}')
|
||||
if final.LineEnding != "" {
|
||||
final.buf.AppendString(final.LineEnding)
|
||||
} else {
|
||||
final.buf.AppendString(DefaultLineEnding)
|
||||
}
|
||||
final.buf.AppendString(final.LineEnding)
|
||||
|
||||
ret := final.buf
|
||||
putJSONEncoder(final)
|
||||
|
@ -415,6 +440,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
|
|||
for i := 0; i < enc.openNamespaces; i++ {
|
||||
enc.buf.AppendByte('}')
|
||||
}
|
||||
enc.openNamespaces = 0
|
||||
}
|
||||
|
||||
func (enc *jsonEncoder) addKey(key string) {
|
||||
|
|
12
vendor/go.uber.org/zap/zapcore/level.go
generated
vendored
12
vendor/go.uber.org/zap/zapcore/level.go
generated
vendored
|
@ -55,6 +55,18 @@ const (
|
|||
_maxLevel = FatalLevel
|
||||
)
|
||||
|
||||
// ParseLevel parses a level based on the lower-case or all-caps ASCII
|
||||
// representation of the log level. If the provided ASCII representation is
|
||||
// invalid an error is returned.
|
||||
//
|
||||
// This is particularly useful when dealing with text input to configure log
|
||||
// levels.
|
||||
func ParseLevel(text string) (Level, error) {
|
||||
var level Level
|
||||
err := level.UnmarshalText([]byte(text))
|
||||
return level, err
|
||||
}
|
||||
|
||||
// String returns a lower-case ASCII representation of the log level.
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2019 Uber Technologies, Inc.
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -18,9 +18,24 @@
|
|||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
// See #682 for more information.
|
||||
// +build go1.12
|
||||
package zapcore
|
||||
|
||||
package zap
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
const _stdLogDefaultDepth = 1
|
||||
// ReflectedEncoder serializes log fields that can't be serialized with Zap's
|
||||
// JSON encoder. These have the ReflectType field type.
|
||||
// Use EncoderConfig.NewReflectedEncoder to set this.
|
||||
type ReflectedEncoder interface {
|
||||
// Encode encodes and writes to the underlying data stream.
|
||||
Encode(interface{}) error
|
||||
}
|
||||
|
||||
func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
|
||||
enc := json.NewEncoder(w)
|
||||
// For consistency with our custom JSON encoder.
|
||||
enc.SetEscapeHTML(false)
|
||||
return enc
|
||||
}
|
27
vendor/go.uber.org/zap/zapcore/sampler.go
generated
vendored
27
vendor/go.uber.org/zap/zapcore/sampler.go
generated
vendored
|
@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
|
|||
// each tick. If more Entries with the same level and message are seen during
|
||||
// the same interval, every Mth message is logged and the rest are dropped.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
|
||||
//
|
||||
// This will log the first 10 log entries with the same level and message
|
||||
// in a one second interval as-is. Following that, it will allow through
|
||||
// every 5th log entry with the same level and message in that interval.
|
||||
//
|
||||
// If thereafter is zero, the Core will drop all log entries after the first N
|
||||
// in that interval.
|
||||
//
|
||||
// Sampler can be configured to report sampling decisions with the SamplerHook
|
||||
// option.
|
||||
//
|
||||
// Keep in mind that zap's sampling implementation is optimized for speed over
|
||||
// Keep in mind that Zap's sampling implementation is optimized for speed over
|
||||
// absolute precision; under load, each tick may be slightly over- or
|
||||
// under-sampled.
|
||||
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
|
||||
|
@ -197,12 +208,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
|
|||
return ce
|
||||
}
|
||||
|
||||
counter := s.counts.get(ent.Level, ent.Message)
|
||||
n := counter.IncCheckReset(ent.Time, s.tick)
|
||||
if n > s.first && (n-s.first)%s.thereafter != 0 {
|
||||
s.hook(ent, LogDropped)
|
||||
return ce
|
||||
if ent.Level >= _minLevel && ent.Level <= _maxLevel {
|
||||
counter := s.counts.get(ent.Level, ent.Message)
|
||||
n := counter.IncCheckReset(ent.Time, s.tick)
|
||||
if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
|
||||
s.hook(ent, LogDropped)
|
||||
return ce
|
||||
}
|
||||
s.hook(ent, LogSampled)
|
||||
}
|
||||
s.hook(ent, LogSampled)
|
||||
return s.Core.Check(ent, ce)
|
||||
}
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -889,7 +889,7 @@ go.uber.org/atomic
|
|||
# go.uber.org/multierr v1.8.0
|
||||
## explicit; go 1.14
|
||||
go.uber.org/multierr
|
||||
# go.uber.org/zap v1.17.0
|
||||
# go.uber.org/zap v1.21.0
|
||||
## explicit; go 1.13
|
||||
go.uber.org/zap
|
||||
go.uber.org/zap/buffer
|
||||
|
|
Loading…
Reference in a new issue