vendor buildkit v0.10.0

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax 2022-03-18 16:01:18 +01:00
parent 6b9b445af6
commit a2aaf4cc83
No known key found for this signature in database
GPG key ID: 3248E46B6BB8C7F7
586 changed files with 60370 additions and 8750 deletions

View file

@ -32,6 +32,7 @@ import (
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/source"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/leaseutil"
@ -72,7 +73,7 @@ func NewSource(opt SourceOpt) (*Source, error) {
// ID returns image scheme identifier
func (is *Source) ID() string {
return source.DockerImageScheme
return srctypes.DockerImageScheme
}
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
@ -300,51 +301,51 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
return err
}
func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) {
func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
p.resolveLocal()
if p.desc.Digest != "" && index == 0 {
dgst, err := p.mainManifestKey(p.platform)
if err != nil {
return "", nil, false, err
return "", "", nil, false, err
}
return dgst.String(), nil, false, nil
return dgst.String(), dgst.String(), nil, false, nil
}
if p.config != nil {
k := cacheKeyFromConfig(p.config).String()
if k == "" {
return digest.FromBytes(p.config).String(), nil, true, nil
return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil
}
return k, nil, true, nil
return k, k, nil, true, nil
}
if err := p.resolve(ctx, g); err != nil {
return "", nil, false, err
return "", "", nil, false, err
}
if p.desc.Digest != "" && index == 0 {
dgst, err := p.mainManifestKey(p.platform)
if err != nil {
return "", nil, false, err
return "", "", nil, false, err
}
return dgst.String(), nil, false, nil
return dgst.String(), dgst.String(), nil, false, nil
}
if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
return "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
}
k := cacheKeyFromConfig(p.config).String()
if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
dgst, err := p.mainManifestKey(p.platform)
if err != nil {
return "", nil, false, err
return "", "", nil, false, err
}
return dgst.String(), nil, true, nil
return dgst.String(), dgst.String(), nil, true, nil
}
return k, nil, true, nil
return k, "", nil, true, nil
}
func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
@ -405,7 +406,7 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
pctx, stopProgress := context.WithCancel(ctx)
pw, _, ctx := progress.FromContext(ctx)
pw, _, ctx := progress.NewFromContext(ctx)
defer pw.Close()
progressDone := make(chan struct{})
@ -586,8 +587,8 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
// TODO: handle windows layers for cross platform builds
if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
if err := cache.SetRecordType(ref, p.src.RecordType); err != nil {
if p.src.RecordType != "" && ref.GetRecordType() == "" {
if err := ref.SetRecordType(p.src.RecordType); err != nil {
ref.Release(context.TODO())
return nil, err
}
@ -806,7 +807,7 @@ type statusInfo struct {
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
pw, _, _ := progress.NewFromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,

View file

@ -22,11 +22,6 @@ import (
"github.com/pkg/errors"
)
func init() {
// See https://github.com/moby/buildkit/pull/1993.
v1.EmptyLayerRemovalSupported = false
}
// ResolveCacheImporterFunc returns a resolver function for local inline cache
func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
@ -75,7 +70,7 @@ func (li *localImporter) Resolve(ctx context.Context, _ specs.Descriptor, id str
if err != nil {
return nil, err
}
return solver.NewCacheManager(id, keysStorage, resultStorage), nil
return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil
}
func (li *localImporter) importInlineCache(ctx context.Context, dt []byte, cc solver.CacheExporterTarget) error {

View file

@ -8,7 +8,6 @@ import (
"github.com/containerd/containerd/content/local"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/snapshots"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
@ -38,7 +37,6 @@ import (
"github.com/moby/buildkit/util/entitlements"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/worker"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
)
@ -169,11 +167,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
return nil, errors.Errorf("snapshotter doesn't support differ")
}
p, err := parsePlatforms(archutil.SupportedPlatforms(true))
if err != nil {
return nil, err
}
leases, err := lm.List(context.TODO(), "labels.\"buildkit/lease.temporary\"")
if err != nil {
return nil, err
@ -184,7 +177,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
wopt := mobyworker.Opt{
ID: "moby",
MetadataStore: md,
ContentStore: store,
CacheManager: cm,
GCPolicy: gcPolicy,
@ -196,7 +188,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
Exporter: exp,
Transport: rt,
Layers: layers,
Platforms: p,
Platforms: archutil.SupportedPlatforms(true),
}
wc := &worker.Controller{}
@ -268,18 +260,6 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
return gcPolicy, nil
}
func parsePlatforms(platformsStr []string) ([]specs.Platform, error) {
out := make([]specs.Platform, 0, len(platformsStr))
for _, s := range platformsStr {
p, err := platforms.Parse(s)
if err != nil {
return nil, err
}
out = append(out, platforms.Normalize(p))
}
return out, nil
}
func getEntitlements(conf config.BuilderConfig) []string {
var ents []string
// Incase of no config settings, NetworkHost should be enabled & SecurityInsecure must be disabled.

View file

@ -14,7 +14,6 @@ import (
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
@ -48,20 +47,12 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
for k, v := range opt {
switch k {
case keyImageName:
for _, v := range strings.Split(v, ",") {
ref, err := distref.ParseNormalizedNamed(v)
if err != nil {
return nil, err
}
i.targetNames = append(i.targetNames, ref)
}
case exptypes.ExporterImageConfigKey:
i.targetName = v
default:
if i.meta == nil {
i.meta = make(map[string][]byte)
}
i.meta[k] = []byte(v)
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
}
return i, nil
@ -69,14 +60,18 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type imageExporterInstance struct {
*imageExporter
targetNames []distref.Named
meta map[string][]byte
targetName string
meta map[string][]byte
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
func (e *imageExporterInstance) Config() exporter.Config {
return exporter.Config{}
}
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
if len(inp.Refs) > 1 {
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
@ -115,7 +110,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
if ref != nil {
layersDone := oneOffProgress(ctx, "exporting layers")
if err := ref.Finalize(ctx, true); err != nil {
if err := ref.Finalize(ctx); err != nil {
return nil, layersDone(err)
}
@ -162,10 +157,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
_ = configDone(nil)
if e.opt.ReferenceStore != nil {
for _, targetName := range e.targetNames {
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
targetNames := strings.Split(e.targetName, ",")
for _, targetName := range targetNames {
tagDone := oneOffProgress(ctx, "naming to "+targetName)
tref, err := distref.ParseNormalizedNamed(targetName)
if err != nil {
return nil, err
}
if err := e.opt.ReferenceStore.AddTag(tref, digest.Digest(id), true); err != nil {
return nil, tagDone(err)
}
_ = tagDone(nil)
@ -173,6 +172,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
}
return map[string]string{
"containerimage.digest": id.String(),
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
exptypes.ExporterImageDigestKey: id.String(),
}, nil
}

View file

@ -3,9 +3,9 @@ package containerimage
import (
"context"
"encoding/json"
"runtime"
"time"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/system"
@ -20,13 +20,15 @@ import (
// )
func emptyImageConfig() ([]byte, error) {
pl := platforms.Normalize(platforms.DefaultSpec())
img := ocispec.Image{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
Architecture: pl.Architecture,
OS: pl.OS,
Variant: pl.Variant,
}
img.RootFS.Type = "layers"
img.Config.WorkingDir = "/"
img.Config.Env = []string{"PATH=" + system.DefaultPathEnvUnix}
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)}
dt, err := json.Marshal(img)
return dt, errors.Wrap(err, "failed to create empty image config")
}
@ -119,7 +121,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
// some history items are missing. add them based on the ref metadata
for _, md := range refMeta[historyLayers:] {
history = append(history, ocispec.History{
Created: &md.createdAt,
Created: md.createdAt,
CreatedBy: md.description,
Comment: "buildkit.exporter.image.v0",
})
@ -130,7 +132,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
for i, h := range history {
if !h.EmptyLayer {
if h.Created == nil {
h.Created = &refMeta[layerIndex].createdAt
h.Created = refMeta[layerIndex].createdAt
}
layerIndex++
}
@ -173,33 +175,39 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
type refMetadata struct {
description string
createdAt time.Time
createdAt *time.Time
}
func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata {
if limit <= 0 {
return nil
}
meta := refMetadata{
description: "created by buildkit", // shouldn't be shown but don't fail build
createdAt: time.Now(),
}
if ref == nil {
return append(getRefMetadata(nil, limit-1), meta)
return make([]refMetadata, limit)
}
if descr := cache.GetDescription(ref.Metadata()); descr != "" {
meta.description = descr
layerChain := ref.LayerChain()
defer layerChain.Release(context.TODO())
if limit < len(layerChain) {
layerChain = layerChain[len(layerChain)-limit:]
}
meta.createdAt = cache.GetCreatedAt(ref.Metadata())
p := ref.Parent()
if p != nil {
defer p.Release(context.TODO())
metas := make([]refMetadata, len(layerChain))
for i, layer := range layerChain {
meta := &metas[i]
if description := layer.GetDescription(); description != "" {
meta.description = description
} else {
meta.description = "created by buildkit" // shouldn't be shown but don't fail build
}
createdAt := layer.GetCreatedAt()
meta.createdAt = &createdAt
}
return append(getRefMetadata(p, limit-1), meta)
return metas
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
pw, _, _ := progress.NewFromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,

View file

@ -5,7 +5,6 @@ import (
"fmt"
"io"
nethttp "net/http"
"strings"
"time"
"github.com/containerd/containerd/content"
@ -19,7 +18,6 @@ import (
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
@ -45,7 +43,6 @@ import (
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/semaphore"
)
@ -62,7 +59,6 @@ type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
@ -95,7 +91,6 @@ func NewWorker(opt Opt) (*Worker, error) {
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
@ -105,7 +100,6 @@ func NewWorker(opt Opt) (*Worker, error) {
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
@ -116,7 +110,6 @@ func NewWorker(opt Opt) (*Worker, error) {
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
@ -148,9 +141,8 @@ func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
if _, ok := pm[platforms.Format(p)]; !ok {
w.Opt.Platforms = append(w.Opt.Platforms, p)
}
}
}
@ -170,18 +162,13 @@ func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
return w.CacheManager().Get(ctx, id, nil, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
@ -193,9 +180,9 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
@ -246,7 +233,7 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
if err := ref.Finalize(ctx); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
@ -277,32 +264,20 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
mds, err := mounts.SearchCacheDir(ctx, w.CacheManager(), id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
for _, md := range mds {
if err := md.SetCachePolicyDefault(); err != nil {
return err
}
if err := md.ClearCacheDirIndex(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, md.ID()); err == nil {
go mref.Release(context.TODO())
}
}
}
@ -432,7 +407,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogr
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, "", discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
@ -479,7 +454,7 @@ func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer,
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
pw, _, _ := progress.NewFromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,

View file

@ -185,7 +185,7 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
var uiErr *instructions.UnknownInstruction
var uiErr *instructions.UnknownInstructionError
if errors.As(err, &uiErr) {
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
}
@ -336,7 +336,7 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co
// ensure that the commands are valid
for _, n := range dockerfile.AST.Children {
if !validCommitCommands[n.Value] {
if !validCommitCommands[strings.ToLower(n.Value)] {
return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value))
}
}

View file

@ -208,7 +208,7 @@ func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error {
}
cmd, err := instructions.ParseCommand(ast.AST.Children[0])
if err != nil {
var uiErr *instructions.UnknownInstruction
var uiErr *instructions.UnknownInstructionError
if errors.As(err, &uiErr) {
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
}

View file

@ -57,6 +57,7 @@ import (
"github.com/docker/docker/runconfig"
volumesservice "github.com/docker/docker/volume/service"
"github.com/moby/buildkit/util/resolver"
resolverconfig "github.com/moby/buildkit/util/resolver/config"
"github.com/moby/locker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -155,7 +156,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
var (
registryKey = "docker.io"
mirrors = make([]string, len(daemon.configStore.Mirrors))
m = map[string]resolver.RegistryConfig{}
m = map[string]resolverconfig.RegistryConfig{}
)
// must trim "https://" or "http://" prefix
for i, v := range daemon.configStore.Mirrors {
@ -165,11 +166,11 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
mirrors[i] = v
}
// set mirrors for default registry
m[registryKey] = resolver.RegistryConfig{Mirrors: mirrors}
m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
for _, v := range daemon.configStore.InsecureRegistries {
u, err := url.Parse(v)
c := resolver.RegistryConfig{}
c := resolverconfig.RegistryConfig{}
if err == nil {
v = u.Host
t := true
@ -191,7 +192,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
if fis, err := os.ReadDir(certsDir); err == nil {
for _, fi := range fis {
if _, ok := m[fi.Name()]; !ok {
m[fi.Name()] = resolver.RegistryConfig{
m[fi.Name()] = resolverconfig.RegistryConfig{
TLSConfigDir: []string{filepath.Join(certsDir, fi.Name())},
}
}

View file

@ -6073,7 +6073,7 @@ func (s *DockerSuite) TestBuildLineErrorOnBuild(c *testing.T) {
ONBUILD
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "parse error line 2: ONBUILD requires at least one argument",
Err: "parse error on line 2: ONBUILD requires at least one argument",
})
}
@ -6087,7 +6087,7 @@ func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *testing.T) {
ERROR
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "parse error line 3: unknown instruction: NOINSTRUCTION",
Err: "parse error on line 3: unknown instruction: NOINSTRUCTION",
})
}
@ -6104,7 +6104,7 @@ func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *testing.T) {
CMD ["/bin/init"]
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "parse error line 6: unknown instruction: NOINSTRUCTION",
Err: "parse error on line 6: unknown instruction: NOINSTRUCTION",
})
}
@ -6118,7 +6118,7 @@ func (s *DockerSuite) TestBuildLineErrorWithComments(c *testing.T) {
NOINSTRUCTION echo ba
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "parse error line 5: unknown instruction: NOINSTRUCTION",
Err: "parse error on line 5: unknown instruction: NOINSTRUCTION",
})
}

View file

@ -42,16 +42,16 @@ require (
github.com/google/go-cmp v0.5.7
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/go-immutable-radix v1.3.1
github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc
github.com/hashicorp/memberlist v0.2.4
github.com/hashicorp/serf v0.8.2
github.com/imdario/mergo v0.3.12
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee
github.com/klauspost/compress v1.14.3
github.com/klauspost/compress v1.15.0
github.com/miekg/dns v1.1.27
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a
github.com/moby/buildkit v0.10.0
github.com/moby/ipvs v1.0.1
github.com/moby/locker v1.0.1
github.com/moby/sys/mount v0.3.1
@ -61,13 +61,13 @@ require (
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
github.com/opencontainers/runc v1.1.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.0
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_golang v1.12.1
github.com/rootless-containers/rootlesskit v0.14.6
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.1.3
@ -83,14 +83,16 @@ require (
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
google.golang.org/grpc v1.43.0
google.golang.org/grpc v1.44.0
gotest.tools/v3 v3.1.0
)
require (
code.cloudfoundry.org/clock v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
@ -98,12 +100,17 @@ require (
github.com/container-storage-interface/spec v1.5.0 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/go-runc v1.0.0 // indirect
github.com/containerd/stargz-snapshotter v0.11.2 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.11.2 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/googleapis v1.4.0 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
@ -112,7 +119,7 @@ require (
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-msgpack v0.5.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
@ -121,17 +128,16 @@ require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/hashstructure v1.0.0 // indirect
github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
github.com/philhofer/fwd v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rexray/gocsi v1.2.2 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/tinylib/msgp v1.1.0 // indirect
github.com/tonistiigi/go-archvariant v1.0.0 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
@ -139,6 +145,16 @@ require (
go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
go.etcd.io/etcd/server/v3 v3.5.2 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
go.opentelemetry.io/otel v1.4.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
go.opentelemetry.io/otel/metric v0.27.0 // indirect
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
go.opentelemetry.io/otel/trace v1.4.1 // indirect
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
@ -157,8 +173,6 @@ replace (
github.com/armon/go-radix => github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/hashicorp/go-msgpack => github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67
github.com/hashicorp/go-multierror => github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/serf => github.com/hashicorp/serf v0.7.1-0.20160317193612-598c54895cc5

53
vendor/github.com/agext/levenshtein/.gitignore generated vendored Normal file
View file

@ -0,0 +1,53 @@
# Ignore docs files
_gh_pages
_site
# Ignore temporary files
README.html
coverage.out
.tmp
# Numerous always-ignore extensions
*.diff
*.err
*.log
*.orig
*.rej
*.swo
*.swp
*.vi
*.zip
*~
# OS or Editor folders
._*
.cache
.DS_Store
.idea
.project
.settings
.tmproj
*.esproj
*.sublime-project
*.sublime-workspace
nbproject
Thumbs.db
# Komodo
.komodotools
*.komodoproject
# SCSS-Lint
scss-lint-report.xml
# grunt-contrib-sass cache
.sass-cache
# Jekyll metadata
docs/.jekyll-metadata
# Folders to ignore
.build
.test
bower_components
node_modules

30
vendor/github.com/agext/levenshtein/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,30 @@
language: go
sudo: false
matrix:
fast_finish: true
include:
- go: 1.14.x
env: TEST_METHOD=goveralls
- go: 1.13.x
- go: 1.12.x
- go: 1.11.x
- go: 1.10.x
- go: tip
- go: 1.9.x
- go: 1.8.x
- go: 1.7.x
- go: 1.6.x
- go: 1.5.x
allow_failures:
- go: tip
- go: 1.11.x
- go: 1.10.x
- go: 1.9.x
- go: 1.8.x
- go: 1.7.x
- go: 1.6.x
- go: 1.5.x
script: ./test.sh $TEST_METHOD
notifications:
email:
on_success: never

36
vendor/github.com/agext/levenshtein/DCO generated vendored Normal file
View file

@ -0,0 +1,36 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

1
vendor/github.com/agext/levenshtein/MAINTAINERS generated vendored Normal file
View file

@ -0,0 +1 @@
Alex Bucataru <alex@alrux.com> (@AlexBucataru)

5
vendor/github.com/agext/levenshtein/NOTICE generated vendored Normal file
View file

@ -0,0 +1,5 @@
Alrux Go EXTensions (AGExt) - package levenshtein
Copyright 2016 ALRUX Inc.
This product includes software developed at ALRUX Inc.
(http://www.alrux.com/).

38
vendor/github.com/agext/levenshtein/README.md generated vendored Normal file
View file

@ -0,0 +1,38 @@
# A Go package for calculating the Levenshtein distance between two strings
[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein) 
[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein)
[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein)
[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein)
This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
## Project Status
v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
## Overview
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
## Installation
```
go get github.com/agext/levenshtein
```
## License
Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

290
vendor/github.com/agext/levenshtein/levenshtein.go generated vendored Normal file
View file

@ -0,0 +1,290 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
*/
package levenshtein
// Calculate determines the Levenshtein distance between two strings, using
// the given costs for each edit operation. It returns the distance along with
// the lengths of the longest common prefix and suffix.
//
// If maxCost is non-zero, the calculation stops as soon as the distance is determined
// to be greater than maxCost. Therefore, any return value higher than maxCost is a
// lower bound for the actual distance.
func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
l1, l2 := len(str1), len(str2)
// trim common prefix, if any, as it doesn't affect the distance
for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
if str1[prefixLen] != str2[prefixLen] {
break
}
}
str1, str2 = str1[prefixLen:], str2[prefixLen:]
l1 -= prefixLen
l2 -= prefixLen
// trim common suffix, if any, as it doesn't affect the distance
for 0 < l1 && 0 < l2 {
if str1[l1-1] != str2[l2-1] {
str1, str2 = str1[:l1], str2[:l2]
break
}
l1--
l2--
suffixLen++
}
// if the first string is empty, the distance is the length of the second string times the cost of insertion
if l1 == 0 {
dist = l2 * insCost
return
}
// if the second string is empty, the distance is the length of the first string times the cost of deletion
if l2 == 0 {
dist = l1 * delCost
return
}
// variables used in inner "for" loops
var y, dy, c, l int
// if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
if maxCost > 0 {
if subCost < delCost+insCost {
if maxCost >= l1*subCost+(l2-l1)*insCost {
maxCost = 0
}
} else {
if maxCost >= l1*delCost+l2*insCost {
maxCost = 0
}
}
}
if maxCost > 0 {
// prefer the longer string first, to minimize time;
// a swap also transposes the meanings of insertion and deletion.
if l1 < l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
// the length differential times cost of deletion is a lower bound for the cost;
// if it is higher than the maxCost, there is no point going into the main calculation.
if dist = (l1 - l2) * delCost; dist > maxCost {
return
}
d := make([]int, l1+1)
// offset and length of d in the current row
doff, dlen := 0, 1
for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
d[y] = dy
y++
dy = y * delCost
}
// fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
for x := 0; x < l2; x++ {
dy, d[doff] = d[doff], d[doff]+insCost
for doff < l1 && d[doff] > maxCost && dlen > 0 {
if str1[doff] != str2[x] {
dy += subCost
}
doff++
dlen--
if c = d[doff] + insCost; c < dy {
dy = c
}
dy, d[doff] = d[doff], dy
}
for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
if y < l1 {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
y++
dlen++
}
}
// fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
if dlen == 0 {
dist = maxCost + 1
return
}
}
if doff+dlen-1 < l1 {
dist = maxCost + 1
return
}
dist = d[l1]
} else {
// ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
// worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
// http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
// prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
// a swap also transposes the meanings of insertion and deletion.
if l1 > l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
d := make([]int, l1+1)
for y = 1; y <= l1; y++ {
d[y] = y * delCost
}
for x := 0; x < l2; x++ {
dy, d[0] = d[0], d[0]+insCost
for y = 0; y < l1; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
}
dist = d[l1]
}
return
}
// Distance returns the Levenshtein distance between str1 and str2, using the
// default or provided cost values. Pass nil for the third argument to use the
// default cost of 1 for all three operations, with no maximum.
func Distance(str1, str2 string, p *Params) int {
if p == nil {
p = defaultParams
}
dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
return dist
}
// Similarity returns a score in the range of 0..1 for how similar the two strings are.
// A score of 1 means the strings are identical, and 0 means they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Similarity(str1, str2 string, p *Params) float64 {
return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
}
// Match returns a similarity score adjusted by the same method as proposed by Winkler for
// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
// similarity score is already over a threshold.
//
// The score is in the range of 0..1, with 1 meaning the strings are identical,
// and 0 meaning they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations, maximum length of
// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Match(str1, str2 string, p *Params) float64 {
s1, s2 := []rune(str1), []rune(str2)
l1, l2 := len(s1), len(s2)
// two empty strings are identical; shortcut also avoids divByZero issues later on.
if l1 == 0 && l2 == 0 {
return 1
}
if p == nil {
p = defaultParams
}
// a min over 1 can never be satisfied, so the score is 0.
if p.minScore > 1 {
return 0
}
insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
if l1 > l2 {
l1, l2, insCost, delCost = l2, l1, delCost, insCost
}
if p.subCost < delCost+insCost {
maxDist = l1*p.subCost + (l2-l1)*insCost
} else {
maxDist = l1*delCost + l2*insCost
}
// a zero min is always satisfied, so no need to set a max cost.
if p.minScore > 0 {
// if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
// for the max cost, because a sim score below min cannot receive a bonus.
if p.minScore < p.bonusThreshold {
// round down the max - a cost equal to a rounded up max would already be under min.
max = int((1 - p.minScore) * float64(maxDist))
} else {
// p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
// p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
// p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
// 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
// (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
}
}
dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
if max > 0 && dist > max {
return 0
}
sim := 1 - float64(dist)/float64(maxDist)
if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
if pl > p.bonusPrefix {
pl = p.bonusPrefix
}
sim += float64(pl) * p.bonusScale * (1 - sim)
}
if sim < p.minScore {
return 0
}
return sim
}

152
vendor/github.com/agext/levenshtein/params.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package levenshtein
// Params represents a set of parameter values for the various formulas involved
// in the calculation of the Levenshtein string metrics.
type Params struct {
insCost int
subCost int
delCost int
maxCost int
minScore float64
bonusPrefix int
bonusScale float64
bonusThreshold float64
}
var (
defaultParams = NewParams()
)
// NewParams creates a new set of parameters and initializes it with the default values.
func NewParams() *Params {
return &Params{
insCost: 1,
subCost: 1,
delCost: 1,
maxCost: 0,
minScore: 0,
bonusPrefix: 4,
bonusScale: .1,
bonusThreshold: .7,
}
}
// Clone returns a pointer to a copy of the receiver parameter set, or of a new
// default parameter set if the receiver is nil.
func (p *Params) Clone() *Params {
if p == nil {
return NewParams()
}
return &Params{
insCost: p.insCost,
subCost: p.subCost,
delCost: p.delCost,
maxCost: p.maxCost,
minScore: p.minScore,
bonusPrefix: p.bonusPrefix,
bonusScale: p.bonusScale,
bonusThreshold: p.bonusThreshold,
}
}
// InsCost overrides the default value of 1 for the cost of insertion.
// The new value must be zero or positive.
func (p *Params) InsCost(v int) *Params {
if v >= 0 {
p.insCost = v
}
return p
}
// SubCost overrides the default value of 1 for the cost of substitution.
// The new value must be zero or positive.
func (p *Params) SubCost(v int) *Params {
if v >= 0 {
p.subCost = v
}
return p
}
// DelCost overrides the default value of 1 for the cost of deletion.
// The new value must be zero or positive.
func (p *Params) DelCost(v int) *Params {
if v >= 0 {
p.delCost = v
}
return p
}
// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
// The calculation of Distance() stops when the result is guaranteed to exceed
// this maximum, returning a lower-bound rather than exact value.
// The new value must be zero or positive.
func (p *Params) MaxCost(v int) *Params {
if v >= 0 {
p.maxCost = v
}
return p
}
// MinScore overrides the default value of 0 for the minimum similarity score.
// Scores below this threshold are returned as 0 by Similarity() and Match().
// The new value must be zero or positive. Note that a minimum greater than 1
// can never be satisfied, resulting in a score of 0 for any pair of strings.
func (p *Params) MinScore(v float64) *Params {
if v >= 0 {
p.minScore = v
}
return p
}
// BonusPrefix overrides the default value for the maximum length of
// common prefix to be considered for bonus by Match().
// The new value must be zero or positive.
func (p *Params) BonusPrefix(v int) *Params {
if v >= 0 {
p.bonusPrefix = v
}
return p
}
// BonusScale overrides the default value for the scaling factor used by Match()
// in calculating the bonus.
// The new value must be zero or positive. To guarantee that the similarity score
// remains in the interval 0..1, this scaling factor is not allowed to exceed
// 1 / BonusPrefix.
func (p *Params) BonusScale(v float64) *Params {
if v >= 0 {
p.bonusScale = v
}
// the bonus cannot exceed (1-sim), or the score may become greater than 1.
if float64(p.bonusPrefix)*p.bonusScale > 1 {
p.bonusScale = 1 / float64(p.bonusPrefix)
}
return p
}
// BonusThreshold overrides the default value for the minimum similarity score
// for which Match() can assign a bonus.
// The new value must be zero or positive. Note that a threshold greater than 1
// effectively makes Match() become the equivalent of Similarity().
func (p *Params) BonusThreshold(v float64) *Params {
if v >= 0 {
p.bonusThreshold = v
}
return p
}

10
vendor/github.com/agext/levenshtein/test.sh generated vendored Normal file
View file

@ -0,0 +1,10 @@
set -ev
if [[ "$1" == "goveralls" ]]; then
echo "Testing with goveralls..."
go get github.com/mattn/goveralls
$HOME/gopath/bin/goveralls -service=travis-ci
else
echo "Testing with go test..."
go test -v ./...
fi

22
vendor/github.com/armon/circbuf/.gitignore generated vendored Normal file
View file

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

20
vendor/github.com/armon/circbuf/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Armon Dadgar
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

28
vendor/github.com/armon/circbuf/README.md generated vendored Normal file
View file

@ -0,0 +1,28 @@
circbuf
=======
This repository provides the `circbuf` package. This provides a `Buffer` object
which is a circular (or ring) buffer. It has a fixed size, but can be written
to infinitely. Only the last `size` bytes are ever retained. The buffer implements
the `io.Writer` interface.
Documentation
=============
Full documentation can be found on [Godoc](http://godoc.org/github.com/armon/circbuf)
Usage
=====
The `circbuf` package is very easy to use:
```go
buf, _ := NewBuffer(6)
buf.Write([]byte("hello world"))
if string(buf.Bytes()) != " world" {
panic("should only have last 6 bytes!")
}
```

92
vendor/github.com/armon/circbuf/circbuf.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package circbuf
import (
"fmt"
)
// Buffer implements a circular buffer. It is a fixed size,
// and new writes overwrite older data, such that for a buffer
// of size N, for any amount of writes, only the last N bytes
// are retained.
type Buffer struct {
data []byte
size int64
writeCursor int64
written int64
}
// NewBuffer creates a new buffer of a given size. The size
// must be greater than 0.
func NewBuffer(size int64) (*Buffer, error) {
if size <= 0 {
return nil, fmt.Errorf("Size must be positive")
}
b := &Buffer{
size: size,
data: make([]byte, size),
}
return b, nil
}
// Write writes up to len(buf) bytes to the internal ring,
// overriding older data if necessary.
func (b *Buffer) Write(buf []byte) (int, error) {
// Account for total bytes written
n := len(buf)
b.written += int64(n)
// If the buffer is larger than ours, then we only care
// about the last size bytes anyways
if int64(n) > b.size {
buf = buf[int64(n)-b.size:]
}
// Copy in place
remain := b.size - b.writeCursor
copy(b.data[b.writeCursor:], buf)
if int64(len(buf)) > remain {
copy(b.data, buf[remain:])
}
// Update location of the cursor
b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size)
return n, nil
}
// Size returns the size of the buffer
func (b *Buffer) Size() int64 {
return b.size
}
// TotalWritten provides the total number of bytes written
func (b *Buffer) TotalWritten() int64 {
return b.written
}
// Bytes provides a slice of the bytes written. This
// slice should not be written to.
func (b *Buffer) Bytes() []byte {
switch {
case b.written >= b.size && b.writeCursor == 0:
return b.data
case b.written > b.size:
out := make([]byte, b.size)
copy(out, b.data[b.writeCursor:])
copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor])
return out
default:
return b.data[:b.writeCursor]
}
}
// Reset resets the buffer so it has no content.
func (b *Buffer) Reset() {
b.writeCursor = 0
b.written = 0
}
// String returns the contents of the buffer as a string
func (b *Buffer) String() string {
return string(b.Bytes())
}

View file

@ -0,0 +1,203 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package walking
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"math/rand"
"time"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type walkingDiff struct {
store content.Store
}
var emptyDesc = ocispec.Descriptor{}
var uncompressed = "containerd.io/uncompressed"
// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is
// calculated by mounting both the upper and lower mount sets and walking the
// mounted directories concurrently. Changes are calculated by comparing files
// against each other or by comparing file existence between directories.
// NewWalkingDiff uses no special characteristics of the mount sets and is
// expected to work with any filesystem.
func NewWalkingDiff(store content.Store) diff.Comparer {
return &walkingDiff{
store: store,
}
}
// Compare creates a diff between the given mounts and uploads the result
// to the content store.
func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
var config diff.Config
for _, opt := range opts {
if err := opt(&config); err != nil {
return emptyDesc, err
}
}
var isCompressed bool
if config.Compressor != nil {
if config.MediaType == "" {
return emptyDesc, errors.New("media type must be explicitly specified when using custom compressor")
}
isCompressed = true
} else {
if config.MediaType == "" {
config.MediaType = ocispec.MediaTypeImageLayerGzip
}
switch config.MediaType {
case ocispec.MediaTypeImageLayer:
case ocispec.MediaTypeImageLayerGzip:
isCompressed = true
default:
return emptyDesc, fmt.Errorf("unsupported diff media type: %v: %w", config.MediaType, errdefs.ErrNotImplemented)
}
}
var ocidesc ocispec.Descriptor
if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error {
return mount.WithTempMount(ctx, upper, func(upperRoot string) error {
var newReference bool
if config.Reference == "" {
newReference = true
config.Reference = uniqueRef()
}
cw, err := s.store.Writer(ctx,
content.WithRef(config.Reference),
content.WithDescriptor(ocispec.Descriptor{
MediaType: config.MediaType, // most contentstore implementations just ignore this
}))
if err != nil {
return fmt.Errorf("failed to open writer: %w", err)
}
// errOpen is set when an error occurs while the content writer has not been
// committed or closed yet to force a cleanup
var errOpen error
defer func() {
if errOpen != nil {
cw.Close()
if newReference {
if abortErr := s.store.Abort(ctx, config.Reference); abortErr != nil {
log.G(ctx).WithError(abortErr).WithField("ref", config.Reference).Warnf("failed to delete diff upload")
}
}
}
}()
if !newReference {
if errOpen = cw.Truncate(0); errOpen != nil {
return errOpen
}
}
if isCompressed {
dgstr := digest.SHA256.Digester()
var compressed io.WriteCloser
if config.Compressor != nil {
compressed, errOpen = config.Compressor(cw, config.MediaType)
if errOpen != nil {
return fmt.Errorf("failed to get compressed stream: %w", errOpen)
}
} else {
compressed, errOpen = compression.CompressStream(cw, compression.Gzip)
if errOpen != nil {
return fmt.Errorf("failed to get compressed stream: %w", errOpen)
}
}
errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot)
compressed.Close()
if errOpen != nil {
return fmt.Errorf("failed to write compressed diff: %w", errOpen)
}
if config.Labels == nil {
config.Labels = map[string]string{}
}
config.Labels[uncompressed] = dgstr.Digest().String()
} else {
if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); errOpen != nil {
return fmt.Errorf("failed to write diff: %w", errOpen)
}
}
var commitopts []content.Opt
if config.Labels != nil {
commitopts = append(commitopts, content.WithLabels(config.Labels))
}
dgst := cw.Digest()
if errOpen = cw.Commit(ctx, 0, dgst, commitopts...); errOpen != nil {
if !errdefs.IsAlreadyExists(errOpen) {
return fmt.Errorf("failed to commit: %w", errOpen)
}
errOpen = nil
}
info, err := s.store.Info(ctx, dgst)
if err != nil {
return fmt.Errorf("failed to get info from content store: %w", err)
}
if info.Labels == nil {
info.Labels = make(map[string]string)
}
// Set uncompressed label if digest already existed without label
if _, ok := info.Labels[uncompressed]; !ok {
info.Labels[uncompressed] = config.Labels[uncompressed]
if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil {
return fmt.Errorf("error setting uncompressed label: %w", err)
}
}
ocidesc = ocispec.Descriptor{
MediaType: config.MediaType,
Size: info.Size,
Digest: info.Digest,
}
return nil
})
}); err != nil {
return emptyDesc, err
}
return ocidesc, nil
}
func uniqueRef() string {
t := time.Now()
var b [3]byte
// Ignore read failures, just decreases uniqueness
rand.Read(b[:])
return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:]))
}

View file

@ -0,0 +1,126 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package converter provides image converter
package converter
import (
"context"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/platforms"
)
type convertOpts struct {
layerConvertFunc ConvertFunc
docker2oci bool
indexConvertFunc ConvertFunc
platformMC platforms.MatchComparer
}
// Opt is an option for Convert()
type Opt func(*convertOpts) error
// WithLayerConvertFunc specifies the function that converts layers.
func WithLayerConvertFunc(fn ConvertFunc) Opt {
return func(copts *convertOpts) error {
copts.layerConvertFunc = fn
return nil
}
}
// WithDockerToOCI converts Docker media types into OCI ones.
func WithDockerToOCI(v bool) Opt {
return func(copts *convertOpts) error {
copts.docker2oci = true
return nil
}
}
// WithPlatform specifies the platform.
// Defaults to all platforms.
func WithPlatform(p platforms.MatchComparer) Opt {
return func(copts *convertOpts) error {
copts.platformMC = p
return nil
}
}
// WithIndexConvertFunc specifies the function that converts manifests and index (manifest lists).
// Defaults to DefaultIndexConvertFunc.
func WithIndexConvertFunc(fn ConvertFunc) Opt {
return func(copts *convertOpts) error {
copts.indexConvertFunc = fn
return nil
}
}
// Client is implemented by *containerd.Client .
type Client interface {
WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error)
ContentStore() content.Store
ImageService() images.Store
}
// Convert converts an image.
func Convert(ctx context.Context, client Client, dstRef, srcRef string, opts ...Opt) (*images.Image, error) {
var copts convertOpts
for _, o := range opts {
if err := o(&copts); err != nil {
return nil, err
}
}
if copts.platformMC == nil {
copts.platformMC = platforms.All
}
if copts.indexConvertFunc == nil {
copts.indexConvertFunc = DefaultIndexConvertFunc(copts.layerConvertFunc, copts.docker2oci, copts.platformMC)
}
ctx, done, err := client.WithLease(ctx)
if err != nil {
return nil, err
}
defer done(ctx)
cs := client.ContentStore()
is := client.ImageService()
srcImg, err := is.Get(ctx, srcRef)
if err != nil {
return nil, err
}
dstDesc, err := copts.indexConvertFunc(ctx, cs, srcImg.Target)
if err != nil {
return nil, err
}
dstImg := srcImg
dstImg.Name = dstRef
if dstDesc != nil {
dstImg.Target = *dstDesc
}
var res images.Image
if dstRef != srcRef {
_ = is.Delete(ctx, dstRef)
res, err = is.Create(ctx, dstImg)
} else {
res, err = is.Update(ctx, dstImg)
}
return &res, err
}

View file

@ -0,0 +1,453 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package converter
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// ConvertFunc returns a converted content descriptor.
// When the content was not converted, ConvertFunc returns nil.
type ConvertFunc func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error)
// DefaultIndexConvertFunc is the default convert func used by Convert.
func DefaultIndexConvertFunc(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer) ConvertFunc {
c := &defaultConverter{
layerConvertFunc: layerConvertFunc,
docker2oci: docker2oci,
platformMC: platformMC,
diffIDMap: make(map[digest.Digest]digest.Digest),
}
return c.convert
}
// ConvertHookFunc is a callback function called during conversion of a blob.
// orgDesc is the target descriptor to convert. newDesc is passed if conversion happens.
type ConvertHookFunc func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error)
// ConvertHooks is a configuration for hook callbacks called during blob conversion.
type ConvertHooks struct {
// PostConvertHook is a callback function called for each blob after conversion is done.
PostConvertHook ConvertHookFunc
}
// IndexConvertFuncWithHook is the convert func used by Convert with hook functions support.
func IndexConvertFuncWithHook(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer, hooks ConvertHooks) ConvertFunc {
c := &defaultConverter{
layerConvertFunc: layerConvertFunc,
docker2oci: docker2oci,
platformMC: platformMC,
diffIDMap: make(map[digest.Digest]digest.Digest),
hooks: hooks,
}
return c.convert
}
type defaultConverter struct {
layerConvertFunc ConvertFunc
docker2oci bool
platformMC platforms.MatchComparer
diffIDMap map[digest.Digest]digest.Digest // key: old diffID, value: new diffID
diffIDMapMu sync.RWMutex
hooks ConvertHooks
}
// convert dispatches desc.MediaType and calls c.convert{Layer,Manifest,Index,Config}.
//
// Also converts media type if c.docker2oci is set.
func (c *defaultConverter) convert(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
var (
newDesc *ocispec.Descriptor
err error
)
if images.IsLayerType(desc.MediaType) {
newDesc, err = c.convertLayer(ctx, cs, desc)
} else if images.IsManifestType(desc.MediaType) {
newDesc, err = c.convertManifest(ctx, cs, desc)
} else if images.IsIndexType(desc.MediaType) {
newDesc, err = c.convertIndex(ctx, cs, desc)
} else if images.IsConfigType(desc.MediaType) {
newDesc, err = c.convertConfig(ctx, cs, desc)
}
if err != nil {
return nil, err
}
if c.hooks.PostConvertHook != nil {
if newDescPost, err := c.hooks.PostConvertHook(ctx, cs, desc, newDesc); err != nil {
return nil, err
} else if newDescPost != nil {
newDesc = newDescPost
}
}
if images.IsDockerType(desc.MediaType) {
if c.docker2oci {
if newDesc == nil {
newDesc = copyDesc(desc)
}
newDesc.MediaType = ConvertDockerMediaTypeToOCI(newDesc.MediaType)
} else if (newDesc == nil && len(desc.Annotations) != 0) || (newDesc != nil && len(newDesc.Annotations) != 0) {
// Annotations is supported only on OCI manifest.
// We need to remove annotations for Docker media types.
if newDesc == nil {
newDesc = copyDesc(desc)
}
newDesc.Annotations = nil
}
}
logrus.WithField("old", desc).WithField("new", newDesc).Debugf("converted")
return newDesc, nil
}
func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor {
descCopy := desc
return &descCopy
}
// convertLayer converts image image layers if c.layerConvertFunc is set.
//
// c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones.
func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
if c.layerConvertFunc != nil {
return c.layerConvertFunc(ctx, cs, desc)
}
return nil, nil
}
// convertManifest converts image manifests.
//
// - converts `.mediaType` if the target format is OCI
// - records diff ID changes in c.diffIDMap
func (c *defaultConverter) convertManifest(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
var (
manifest ocispec.Manifest
modified bool
)
labels, err := readJSON(ctx, cs, &manifest, desc)
if err != nil {
return nil, err
}
if labels == nil {
labels = make(map[string]string)
}
if images.IsDockerType(manifest.MediaType) && c.docker2oci {
manifest.MediaType = ConvertDockerMediaTypeToOCI(manifest.MediaType)
modified = true
}
var mu sync.Mutex
eg, ctx2 := errgroup.WithContext(ctx)
for i, l := range manifest.Layers {
i := i
l := l
oldDiffID, err := images.GetDiffID(ctx, cs, l)
if err != nil {
return nil, err
}
eg.Go(func() error {
newL, err := c.convert(ctx2, cs, l)
if err != nil {
return err
}
if newL != nil {
mu.Lock()
// update GC labels
ClearGCLabels(labels, l.Digest)
labelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", i)
labels[labelKey] = newL.Digest.String()
manifest.Layers[i] = *newL
modified = true
mu.Unlock()
// diffID changes if the tar entries were modified.
// diffID stays same if only the compression type was changed.
// When diffID changed, add a map entry so that we can update image config.
newDiffID, err := images.GetDiffID(ctx, cs, *newL)
if err != nil {
return err
}
if newDiffID != oldDiffID {
c.diffIDMapMu.Lock()
c.diffIDMap[oldDiffID] = newDiffID
c.diffIDMapMu.Unlock()
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
newConfig, err := c.convert(ctx, cs, manifest.Config)
if err != nil {
return nil, err
}
if newConfig != nil {
ClearGCLabels(labels, manifest.Config.Digest)
labels["containerd.io/gc.ref.content.config"] = newConfig.Digest.String()
manifest.Config = *newConfig
modified = true
}
if modified {
return writeJSON(ctx, cs, &manifest, desc, labels)
}
return nil, nil
}
// convertIndex converts image index.
//
// - converts `.mediaType` if the target format is OCI
// - clears manifest entries that do not match c.platformMC
func (c *defaultConverter) convertIndex(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
var (
index ocispec.Index
modified bool
)
labels, err := readJSON(ctx, cs, &index, desc)
if err != nil {
return nil, err
}
if labels == nil {
labels = make(map[string]string)
}
if images.IsDockerType(index.MediaType) && c.docker2oci {
index.MediaType = ConvertDockerMediaTypeToOCI(index.MediaType)
modified = true
}
newManifests := make([]ocispec.Descriptor, len(index.Manifests))
newManifestsToBeRemoved := make(map[int]struct{}) // slice index
var mu sync.Mutex
eg, ctx2 := errgroup.WithContext(ctx)
for i, mani := range index.Manifests {
i := i
mani := mani
labelKey := fmt.Sprintf("containerd.io/gc.ref.content.m.%d", i)
eg.Go(func() error {
if mani.Platform != nil && !c.platformMC.Match(*mani.Platform) {
mu.Lock()
ClearGCLabels(labels, mani.Digest)
newManifestsToBeRemoved[i] = struct{}{}
modified = true
mu.Unlock()
return nil
}
newMani, err := c.convert(ctx2, cs, mani)
if err != nil {
return err
}
mu.Lock()
if newMani != nil {
ClearGCLabels(labels, mani.Digest)
labels[labelKey] = newMani.Digest.String()
// NOTE: for keeping manifest order, we specify `i` index explicitly
newManifests[i] = *newMani
modified = true
} else {
newManifests[i] = mani
}
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
if modified {
var newManifestsClean []ocispec.Descriptor
for i, m := range newManifests {
if _, ok := newManifestsToBeRemoved[i]; !ok {
newManifestsClean = append(newManifestsClean, m)
}
}
index.Manifests = newManifestsClean
return writeJSON(ctx, cs, &index, desc, labels)
}
return nil, nil
}
// convertConfig converts image config contents.
//
// - updates `.rootfs.diff_ids` using c.diffIDMap .
//
// - clears legacy `.config.Image` and `.container_config.Image` fields if `.rootfs.diff_ids` was updated.
func (c *defaultConverter) convertConfig(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
var (
cfg DualConfig
cfgAsOCI ocispec.Image // read only, used for parsing cfg
modified bool
)
labels, err := readJSON(ctx, cs, &cfg, desc)
if err != nil {
return nil, err
}
if labels == nil {
labels = make(map[string]string)
}
if _, err := readJSON(ctx, cs, &cfgAsOCI, desc); err != nil {
return nil, err
}
if rootfs := cfgAsOCI.RootFS; rootfs.Type == "layers" {
rootfsModified := false
c.diffIDMapMu.RLock()
for i, oldDiffID := range rootfs.DiffIDs {
if newDiffID, ok := c.diffIDMap[oldDiffID]; ok && newDiffID != oldDiffID {
rootfs.DiffIDs[i] = newDiffID
rootfsModified = true
}
}
c.diffIDMapMu.RUnlock()
if rootfsModified {
rootfsB, err := json.Marshal(rootfs)
if err != nil {
return nil, err
}
cfg["rootfs"] = (*json.RawMessage)(&rootfsB)
modified = true
}
}
if modified {
// cfg may have dummy value for legacy `.config.Image` and `.container_config.Image`
// We should clear the ID if we changed the diff IDs.
if _, err := clearDockerV1DummyID(cfg); err != nil {
return nil, err
}
return writeJSON(ctx, cs, &cfg, desc, labels)
}
return nil, nil
}
// clearDockerV1DummyID clears the dummy values for legacy `.config.Image` and `.container_config.Image`.
// Returns true if the cfg was modified.
func clearDockerV1DummyID(cfg DualConfig) (bool, error) {
var modified bool
f := func(k string) error {
if configX, ok := cfg[k]; ok && configX != nil {
var configField map[string]*json.RawMessage
if err := json.Unmarshal(*configX, &configField); err != nil {
return err
}
delete(configField, "Image")
b, err := json.Marshal(configField)
if err != nil {
return err
}
cfg[k] = (*json.RawMessage)(&b)
modified = true
}
return nil
}
if err := f("config"); err != nil {
return modified, err
}
if err := f("container_config"); err != nil {
return modified, err
}
return modified, nil
}
// DualConfig covers Docker config (v1.0, v1.1, v1.2) and OCI config.
// Unmarshalled as map[string]*json.RawMessage to retain unknown fields on remarshalling.
type DualConfig map[string]*json.RawMessage
func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) {
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
labels := info.Labels
b, err := content.ReadBlob(ctx, cs, desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, x); err != nil {
return nil, err
}
return labels, nil
}
func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) {
b, err := json.Marshal(x)
if err != nil {
return nil, err
}
dgst := digest.SHA256.FromBytes(b)
ref := fmt.Sprintf("converter-write-json-%s", dgst.String())
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
if err != nil {
return nil, err
}
if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
newDesc := oldDesc
newDesc.Size = int64(len(b))
newDesc.Digest = dgst
return &newDesc, nil
}
// ConvertDockerMediaTypeToOCI converts a media type string
func ConvertDockerMediaTypeToOCI(mt string) string {
switch mt {
case images.MediaTypeDockerSchema2ManifestList:
return ocispec.MediaTypeImageIndex
case images.MediaTypeDockerSchema2Manifest:
return ocispec.MediaTypeImageManifest
case images.MediaTypeDockerSchema2LayerGzip:
return ocispec.MediaTypeImageLayerGzip
case images.MediaTypeDockerSchema2LayerForeignGzip:
return ocispec.MediaTypeImageLayerNonDistributableGzip
case images.MediaTypeDockerSchema2Layer:
return ocispec.MediaTypeImageLayer
case images.MediaTypeDockerSchema2LayerForeign:
return ocispec.MediaTypeImageLayerNonDistributable
case images.MediaTypeDockerSchema2Config:
return ocispec.MediaTypeImageConfig
default:
return mt
}
}
// ClearGCLabels clears GC labels for the given digest.
func ClearGCLabels(labels map[string]string, dgst digest.Digest) {
for k, v := range labels {
if v == dgst.String() && strings.HasPrefix(k, "containerd.io/gc.ref.content") {
delete(labels, k)
}
}
}

202
vendor/github.com/containerd/stargz-snapshotter/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,67 @@
The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0.
However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header.
```
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the NOTICE.md file.
```
These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following.
```
Copyright (c) 2019 Google LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header.
```
The MIT License (MIT)
Copyright (c) 2015 Tintri
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,628 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"golang.org/x/sync/errgroup"
)
type options struct {
chunkSize int
compressionLevel int
prioritizedFiles []string
missedPrioritizedFiles *[]string
compression Compression
}
type Option func(o *options) error
// WithChunkSize option specifies the chunk size of eStargz blob to build.
func WithChunkSize(chunkSize int) Option {
return func(o *options) error {
o.chunkSize = chunkSize
return nil
}
}
// WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression.
// See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option {
return func(o *options) error {
o.compressionLevel = level
return nil
}
}
// WithPrioritizedFiles option specifies the list of prioritized files.
// These files must be complete paths that are absolute or relative to "/"
// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
// are treated as "/foo/bar".
func WithPrioritizedFiles(files []string) Option {
return func(o *options) error {
o.prioritizedFiles = files
return nil
}
}
// WithAllowPrioritizeNotFound makes Build continue the execution even if some
// of prioritized files specified by WithPrioritizedFiles option aren't found
// in the input tar. Instead, this records all missed file names to the passed
// slice.
func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
return func(o *options) error {
if missedFiles == nil {
return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed")
}
o.missedPrioritizedFiles = missedFiles
return nil
}
}
// WithCompression specifies compression algorithm to be used.
// Default is gzip.
func WithCompression(compression Compression) Option {
return func(o *options) error {
o.compression = compression
return nil
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
diffID digest.Digester
tocDigest digest.Digest
}
// DiffID returns the digest of uncompressed blob.
// It is only valid to call DiffID after Close.
func (b *Blob) DiffID() digest.Digest {
return b.diffID.Digest()
}
// TOCDigest returns the digest of uncompressed TOC JSON.
func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
// or plain tar) passed through the argument. If there are some prioritized files are listed in
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
var opts options
opts.compressionLevel = gzip.BestCompression // BestCompression by default
for _, o := range opt {
if err := o(&opts); err != nil {
return nil, err
}
}
if opts.compression == nil {
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
}
layerFiles := newTempFiles()
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
}
}
}()
tarBlob, err := decompressBlob(tarBlob, layerFiles)
if err != nil {
return nil, err
}
entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles)
if err != nil {
return nil, err
}
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex
var eg errgroup.Group
for i, parts := range tarParts {
i, parts := i, parts
// builds verifiable stargz sub-blobs
eg.Go(func() error {
esgzFile, err := layerFiles.TempFile("", "esgzdata")
if err != nil {
return err
}
sw := NewWriterWithCompressor(esgzFile, opts.compression)
sw.ChunkSize = opts.chunkSize
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err
}
mu.Lock()
writers[i] = sw
payloads[i] = esgzFile
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
rErr = err
return nil, err
}
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
if err != nil {
rErr = err
return nil, err
}
var rs []io.Reader
for _, p := range payloads {
fs, err := fileSectionReader(p)
if err != nil {
return nil, err
}
rs = append(rs, fs)
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
go func() {
r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
defer r.Close()
if _, err := io.Copy(diffID.Hash(), r); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return &Blob{
ReadCloser: readCloser{
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
tocDigest: tocDgst,
diffID: diffID,
}, nil
}
// closeWithCombine takes unclosed Writers and close them. This also returns the
// toc that combined all Writers into.
// Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob.
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed")
}
for _, w := range ws {
if w.closed {
return nil, "", fmt.Errorf("writer must be unclosed")
}
defer func(w *Writer) { w.closed = true }(w)
if err := w.closeGz(); err != nil {
return nil, "", err
}
if err := w.bw.Flush(); err != nil {
return nil, "", err
}
}
var (
mtoc = new(JTOC)
currentOffset int64
)
mtoc.Version = ws[0].toc.Version
for _, w := range ws {
for _, e := range w.toc.Entries {
// Recalculate Offset of non-empty files/chunks
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
e.Offset += currentOffset
}
mtoc.Entries = append(mtoc.Entries, e)
}
if w.toc.Version > mtoc.Version {
mtoc.Version = w.toc.Version
}
currentOffset += w.cw.n
}
return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
}
func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
buf := new(bytes.Buffer)
tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
if err != nil {
return nil, "", err
}
return buf, tocDigest, nil
}
// divideEntries divides passed entries to the parts at least the number specified by the
// argument.
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
var estimatedSize int64
for _, e := range entries {
estimatedSize += e.header.Size
}
unitSize := estimatedSize / int64(minPartsNum)
var (
nextEnd = unitSize
offset int64
)
set = append(set, []*entry{})
for _, e := range entries {
set[len(set)-1] = append(set[len(set)-1], e)
offset += e.header.Size
if offset > nextEnd {
set = append(set, []*entry{})
nextEnd += unitSize
}
}
return
}
var errNotFound = errors.New("not found")
// sortEntries reads the specified tar blob and returns a list of tar entries.
// If some of prioritized files are specified, the list starts from these
// files with keeping the order specified by the argument.
func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) {
// Import tar file.
intar, err := importTar(in)
if err != nil {
return nil, fmt.Errorf("failed to sort: %w", err)
}
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
for _, l := range prioritized {
if err := moveRec(l, intar, sorted); err != nil {
if errors.Is(err, errNotFound) && missedPrioritized != nil {
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
}
return nil, fmt.Errorf("failed to sort tar entries: %w", err)
}
}
if len(prioritized) == 0 {
sorted.add(&entry{
header: &tar.Header{
Name: NoPrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
} else {
sorted.add(&entry{
header: &tar.Header{
Name: PrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
}
// Dump all entry and concatinate them.
return append(sorted.dump(), intar.dump()...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
// through the arguments.
func readerFromEntries(entries ...*entry) io.Reader {
pr, pw := io.Pipe()
go func() {
tw := tar.NewWriter(pw)
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
return
}
}
pw.Close()
}()
return pr
}
func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
if err != nil {
return nil, fmt.Errorf("failed to make position watcher: %w", err)
}
tr := tar.NewReader(pw)
// Walk through all nodes.
for {
// Fetch and parse next header.
h, err := tr.Next()
if err != nil {
if err == io.EOF {
break
} else {
return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
}
switch cleanEntryName(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:
// Ignore existing landmark
continue
}
// Add entry. If it already exists, replace it.
if _, ok := tf.get(h.Name); ok {
tf.remove(h.Name)
}
tf.add(&entry{
header: h,
payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
})
}
return tf, nil
}
func moveRec(name string, in *tarFile, out *tarFile) error {
name = cleanEntryName(name)
if name == "" { // root directory. stop recursion.
if e, ok := in.get(name); ok {
// entry of the root directory exists. we should move it as well.
// this case will occur if tar entries are prefixed with "./", "/", etc.
out.add(e)
in.remove(name)
}
return nil
}
_, okIn := in.get(name)
_, okOut := out.get(name)
if !okIn && !okOut {
return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
if err := moveRec(parent, in, out); err != nil {
return err
}
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
if err := moveRec(e.header.Linkname, in, out); err != nil {
return err
}
}
if e, ok := in.get(name); ok {
out.add(e)
in.remove(name)
}
return nil
}
type entry struct {
header *tar.Header
payload io.ReadSeeker
}
type tarFile struct {
index map[string]*entry
stream []*entry
}
func (f *tarFile) add(e *entry) {
if f.index == nil {
f.index = make(map[string]*entry)
}
f.index[cleanEntryName(e.header.Name)] = e
f.stream = append(f.stream, e)
}
func (f *tarFile) remove(name string) {
name = cleanEntryName(name)
if f.index != nil {
delete(f.index, name)
}
var filtered []*entry
for _, e := range f.stream {
if cleanEntryName(e.header.Name) == name {
continue
}
filtered = append(filtered, e)
}
f.stream = filtered
}
func (f *tarFile) get(name string) (e *entry, ok bool) {
if f.index == nil {
return nil, false
}
e, ok = f.index[cleanEntryName(name)]
return
}
func (f *tarFile) dump() []*entry {
return f.stream
}
type readCloser struct {
io.Reader
closeFunc func() error
}
func (rc readCloser) Close() error {
return rc.closeFunc()
}
func fileSectionReader(file *os.File) (*io.SectionReader, error) {
info, err := file.Stat()
if err != nil {
return nil, err
}
return io.NewSectionReader(file, 0, info.Size()), nil
}
func newTempFiles() *tempFiles {
return &tempFiles{}
}
type tempFiles struct {
files []*os.File
filesMu sync.Mutex
}
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
f, err := ioutil.TempFile(dir, pattern)
if err != nil {
return nil, err
}
tf.filesMu.Lock()
tf.files = append(tf.files, f)
tf.filesMu.Unlock()
return f, nil
}
func (tf *tempFiles) CleanupAll() error {
tf.filesMu.Lock()
defer tf.filesMu.Unlock()
var allErr []error
for _, f := range tf.files {
if err := f.Close(); err != nil {
allErr = append(allErr, err)
}
if err := os.Remove(f.Name()); err != nil {
allErr = append(allErr, err)
}
}
tf.files = nil
return errorutil.Aggregate(allErr)
}
func newCountReader(r io.ReaderAt) (*countReader, error) {
pos := int64(0)
return &countReader{r: r, cPos: &pos}, nil
}
type countReader struct {
r io.ReaderAt
cPos *int64
mu sync.Mutex
}
func (cr *countReader) Read(p []byte) (int, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
n, err := cr.r.ReadAt(p, *cr.cPos)
if err == nil {
*cr.cPos += int64(n)
}
return n, err
}
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
switch whence {
default:
return 0, fmt.Errorf("Unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
return 0, fmt.Errorf("Unsupported whence: %v", whence)
}
if offset < 0 {
return 0, fmt.Errorf("invalid offset")
}
*cr.cPos = offset
return offset, nil
}
func (cr *countReader) currentPos() int64 {
cr.mu.Lock()
defer cr.mu.Unlock()
return *cr.cPos
}
func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
if org.Size() < 4 {
return org, nil
}
src := make([]byte, 4)
if _, err := org.Read(src); err != nil && err != io.EOF {
return nil, err
}
var dR io.Reader
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
// gzip
dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
if err != nil {
return nil, err
}
defer dgR.Close()
dR = io.Reader(dgR)
} else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) {
// zstd
dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size()))
if err != nil {
return nil, err
}
defer dzR.Close()
dR = io.Reader(dzR)
} else {
// uncompressed
return io.NewSectionReader(org, 0, org.Size()), nil
}
b, err := tmp.TempFile("", "uncompresseddata")
if err != nil {
return nil, err
}
if _, err := io.Copy(b, dR); err != nil {
return nil, err
}
return fileSectionReader(b)
}

View file

@ -0,0 +1,40 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errorutil
import (
"errors"
"fmt"
"strings"
)
// Aggregate combines a list of errors into a single new error.
func Aggregate(errs []error) error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
points := make([]string, len(errs)+1)
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
for i, err := range errs {
points[i+1] = fmt.Sprintf("* %s", err)
}
return errors.New(strings.Join(points, "\n\t"))
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,237 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/binary"
"encoding/json"
"fmt"
"hash"
"io"
"strconv"
digest "github.com/opencontainers/go-digest"
)
type gzipCompression struct {
*GzipCompressor
*GzipDecompressor
}
func newGzipCompressionWithLevel(level int) Compression {
return &gzipCompression{
&GzipCompressor{level},
&GzipDecompressor{},
}
}
func NewGzipCompressor() *GzipCompressor {
return &GzipCompressor{gzip.BestCompression}
}
func NewGzipCompressorWithLevel(level int) *GzipCompressor {
return &GzipCompressor{level}
}
type GzipCompressor struct {
compressionLevel int
}
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
return gzip.NewWriterLevel(w, gc.compressionLevel)
}
func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
tocJSON, err := json.MarshalIndent(toc, "", "\t")
if err != nil {
return "", err
}
gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
gw := io.Writer(gz)
if diffHash != nil {
gw = io.MultiWriter(gz, diffHash)
}
tw := tar.NewWriter(gw)
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
return "", err
}
if _, err := tw.Write(tocJSON); err != nil {
return "", err
}
if err := tw.Close(); err != nil {
return "", err
}
if err := gz.Close(); err != nil {
return "", err
}
if _, err := w.Write(gzipFooterBytes(off)); err != nil {
return "", err
}
return digest.FromBytes(tocJSON), nil
}
// gzipFooterBytes returns the 51 bytes footer.
func gzipFooterBytes(tocOff int64) []byte {
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
// Extra header indicating the offset of TOCJSON
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
header := make([]byte, 4)
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
gz.Header.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
}
return buf.Bytes()
}
type GzipDecompressor struct{}
func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r)
}
func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
return parseTOCEStargz(r)
}
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != FooterSize {
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, 0, 0, err
}
defer zr.Close()
extra := zr.Header.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
}
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
}
if string(subfield[16:]) != "STARGZ" {
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
}
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}
func (gz *GzipDecompressor) FooterSize() int64 {
return FooterSize
}
func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
return decompressTOCEStargz(r)
}
type LegacyGzipDecompressor struct{}
func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r)
}
func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
return parseTOCEStargz(r)
}
func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != legacyFooterSize {
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
extra := zr.Header.Extra
if len(extra) != 16+len("STARGZ") {
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
if string(extra[16:]) != "STARGZ" {
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
}
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}
func (gz *LegacyGzipDecompressor) FooterSize() int64 {
return legacyFooterSize
}
func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
return decompressTOCEStargz(r)
}
func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
tr, err := decompressTOCEStargz(r)
if err != nil {
return nil, "", err
}
dgstr := digest.Canonical.Digester()
toc = new(JTOC)
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
}
if err := tr.Close(); err != nil {
return nil, "", err
}
return toc, dgstr.Digest(), nil
}
func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
zr, err := gzip.NewReader(r)
if err != nil {
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
}
zr.Multistream(false)
tr := tar.NewReader(zr)
h, err := tr.Next()
if err != nil {
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
}
if h.Name != TOCTarName {
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
}
return readCloser{tr, zr.Close}, nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,316 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"hash"
"io"
"os"
"path"
"time"
digest "github.com/opencontainers/go-digest"
)
const (
// TOCTarName is the name of the JSON file in the tar archive in the
// table of contents gzip stream.
TOCTarName = "stargz.index.json"
// FooterSize is the number of bytes in the footer
//
// The footer is an empty gzip stream with no compression and an Extra
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
// number is the offset to the gzip stream of JSON TOC.
//
// 51 comes from:
//
// 10 bytes gzip header
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
// 5 bytes flate header
// 8 bytes gzip footer
// (End of the eStargz blob)
//
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
FooterSize = 51
// legacyFooterSize is the number of bytes in the legacy stargz footer.
//
// 47 comes from:
//
// 10 byte gzip header +
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
// 5 byte flate header
// 8 byte gzip footer (two little endian uint32s: digest, size)
legacyFooterSize = 47
// TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
// digest of the TOC JSON.
// This annotation is valid only when it is specified in `.[]layers.annotations`
// of an image manifest.
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
// pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
// to the runtime but current OCI image doesn't ship this information by default. So we store this
// to the special annotation.
StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size"
// PrefetchLandmark is a file entry which indicates the end position of
// prefetch in the stargz file.
PrefetchLandmark = ".prefetch.landmark"
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
// occur in the stargz file.
NoPrefetchLandmark = ".no.prefetch.landmark"
landmarkContents = 0xf
)
// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
type JTOC struct {
Version int `json:"version"`
Entries []*TOCEntry `json:"entries"`
}
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
type TOCEntry struct {
// Name is the tar entry's name. It is the complete path
// stored in the tar file, not just the base name.
Name string `json:"name"`
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
// "block", "fifo", or "chunk".
// The "chunk" type is used for regular file data chunks past the first
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
// ChunkOffset, and ChunkSize populated.
Type string `json:"type"`
// Size, for regular files, is the logical size of the file.
Size int64 `json:"size,omitempty"`
// ModTime3339 is the modification time of the tar entry. Empty
// means zero or unknown. Otherwise it's in UTC RFC3339
// format. Use the ModTime method to access the time.Time value.
ModTime3339 string `json:"modtime,omitempty"`
modTime time.Time
// LinkName, for symlinks and hardlinks, is the link target.
LinkName string `json:"linkName,omitempty"`
// Mode is the permission and mode bits.
Mode int64 `json:"mode,omitempty"`
// UID is the user ID of the owner.
UID int `json:"uid,omitempty"`
// GID is the group ID of the owner.
GID int `json:"gid,omitempty"`
// Uname is the username of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same UID.
Uname string `json:"userName,omitempty"`
// Gname is the group name of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same GID.
Gname string `json:"groupName,omitempty"`
// Offset, for regular files, provides the offset in the
// stargz file to the file's data bytes. See ChunkOffset and
// ChunkSize.
Offset int64 `json:"offset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types.
DevMajor int `json:"devMajor,omitempty"`
// DevMinor is the major device number for "char" and "block" types.
DevMinor int `json:"devMinor,omitempty"`
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
NumLink int
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
// Digest stores the OCI checksum for regular files payload.
// It has the form "sha256:abcdef01234....".
Digest string `json:"digest,omitempty"`
// ChunkOffset is non-zero if this is a chunk of a large,
// regular file. If so, the Offset is where the gzip header of
// ChunkSize bytes at ChunkOffset in Name begin.
//
// In serialized form, a "chunkSize" JSON field of zero means
// that the chunk goes to the end of the file. After reading
// from the stargz TOC, though, the ChunkSize is initialized
// to a non-zero file for when Type is either "reg" or
// "chunk".
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkSize int64 `json:"chunkSize,omitempty"`
// ChunkDigest stores an OCI digest of the chunk. This must be formed
// as "sha256:0123abcd...".
ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry
}
// ModTime returns the entry's modification time.
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
// NextOffset returns the position (relative to the start of the
// stargz file) of the next gzip boundary after e.Offset.
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
if e.children == nil {
e.children = make(map[string]*TOCEntry)
}
if child.Type == "dir" {
e.NumLink++ // Entry ".." in the subdirectory links to this directory
}
e.children[baseName] = child
}
// isDataType reports whether TOCEntry is a regular file or chunk (something that
// contains regular file data).
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
// Stat returns a FileInfo value representing e.
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
// ForeachChild calls f for each child item. If f returns false, iteration ends.
// If e is not a directory, f is not called.
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
for name, ent := range e.children {
if !f(name, ent) {
return
}
}
}
// LookupChild returns the directory e's child by its base name.
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
child, ok = e.children[baseName]
return
}
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
type fileInfo struct{ e *TOCEntry }
var _ os.FileInfo = fileInfo{}
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
func (fi fileInfo) Size() int64 { return fi.e.Size }
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
func (fi fileInfo) Sys() interface{} { return fi.e }
func (fi fileInfo) Mode() (m os.FileMode) {
// TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() &
(os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky)
switch fi.e.Type {
case "dir":
m |= os.ModeDir
case "symlink":
m |= os.ModeSymlink
case "char":
m |= os.ModeDevice | os.ModeCharDevice
case "block":
m |= os.ModeDevice
case "fifo":
m |= os.ModeNamedPipe
}
return m
}
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
// in a eStargz blob.
type TOCEntryVerifier interface {
// Verifier provides a content verifier that can be used for verifying the
// contents of the specified TOCEntry.
Verifier(ce *TOCEntry) (digest.Verifier, error)
}
// Compression provides the compression helper to be used creating and parsing eStargz.
// This package provides gzip-based Compression by default, but any compression
// algorithm (e.g. zstd) can be used as long as it implements Compression.
type Compression interface {
Compressor
Decompressor
}
// Compressor represents the helper mothods to be used for creating eStargz.
type Compressor interface {
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
// Everytime a chunk is written, the WriteCloser is closed and Writer is
// called again for writing the next chunk.
Writer(w io.Writer) (io.WriteCloser, error)
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
// WriteTOCAndFooter can optionally write anything that affects DiffID calculation
// (e.g. uncompressed TOC JSON).
//
// This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob when it's parsed.
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
}
// Decompressor represents the helper mothods to be used for parsing eStargz.
type Decompressor interface {
// Reader returns ReadCloser to be used for decompressing file payload.
Reader(r io.Reader) (io.ReadCloser, error)
// FooterSize returns the size of the footer of this blob.
FooterSize() int64
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
// the top until the TOC JSON).
//
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
// of the underlying blob that has the range specified by ParseFooter method.
//
// This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob. This must match to the value returned from
// Compressor.WriteTOCAndFooter that is used when creating this blob.
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
}

View file

@ -0,0 +1,172 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// =====
// NOTE: This file is ported from https://github.com/containerd/containerd/blob/v1.5.2/snapshots/overlay/overlayutils/check.go
// TODO: import this from containerd package once we drop support to continerd v1.4.x
// =====
package overlayutils
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
userns "github.com/containerd/containerd/sys"
"github.com/containerd/continuity/fs"
)
// SupportsMultipleLowerDir checks if the system supports multiple lowerdirs,
// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs
// are always available (so this check isn't needed), and backported to RHEL and
// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect
// support on those kernels, without doing a kernel version compare.
//
// Ported from moby overlay2.
func SupportsMultipleLowerDir(d string) error {
td, err := ioutil.TempDir(d, "multiple-lowerdir-check")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
log.L.WithError(err).Warnf("Failed to remove check directory %v", td)
}
}()
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return err
}
}
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work"))
m := mount.Mount{
Type: "overlay",
Source: "overlay",
Options: []string{opts},
}
dest := filepath.Join(td, "merged")
if err := m.Mount(dest); err != nil {
return fmt.Errorf("failed to mount overlay: %w", err)
}
if err := mount.UnmountAll(dest, 0); err != nil {
log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest)
}
return nil
}
// Supported returns nil when the overlayfs is functional on the system with the root directory.
// Supported is not called during plugin initialization, but exposed for downstream projects which uses
// this snapshotter as a library.
func Supported(root string) error {
if err := os.MkdirAll(root, 0700); err != nil {
return err
}
supportsDType, err := fs.SupportsDType(root)
if err != nil {
return err
}
if !supportsDType {
return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root)
}
return SupportsMultipleLowerDir(root)
}
// NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option.
//
// The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11.
//
// The "userxattr" option is NOT needed for the initial user namespace (aka "the host").
//
// Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount
// the overlayfs in a user namespace without the "userxattr" option.
//
// The corresponding kernel commit: https://github.com/torvalds/linux/commit/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1
// > ovl: user xattr
// >
// > Optionally allow using "user.overlay." namespace instead of "trusted.overlay."
// > ...
// > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the
// > "user.overlay.redirect" or "user.overlay.metacopy" xattrs.
// > ...
//
// The "userxattr" support is not exposed in "/sys/module/overlay/parameters".
func NeedsUserXAttr(d string) (bool, error) {
if !userns.RunningInUserNS() {
// we are the real root (i.e., the root in the initial user NS),
// so we do never need "userxattr" opt.
return false, nil
}
// TODO: add fast path for kernel >= 5.11 .
//
// Keep in mind that distro vendors might be going to backport the patch to older kernels.
// So we can't completely remove the check.
tdRoot := filepath.Join(d, "userxattr-check")
if err := os.RemoveAll(tdRoot); err != nil {
log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
}
if err := os.MkdirAll(tdRoot, 0700); err != nil {
return false, err
}
defer func() {
if err := os.RemoveAll(tdRoot); err != nil {
log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
}
}()
td, err := ioutil.TempDir(tdRoot, "")
if err != nil {
return false, err
}
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return false, err
}
}
opts := []string{
fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")),
"userxattr",
}
m := mount.Mount{
Type: "overlay",
Source: "overlay",
Options: opts,
}
dest := filepath.Join(td, "merged")
if err := m.Mount(dest); err != nil {
// Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch.
// Return false without error.
log.L.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr")
return false, nil
}
if err := mount.UnmountAll(dest, 0); err != nil {
log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest)
}
return true, nil
}

0
vendor/github.com/felixge/httpsnoop/.gitignore generated vendored Normal file
View file

6
vendor/github.com/felixge/httpsnoop/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,6 @@
language: go
go:
- 1.6
- 1.7
- 1.8

19
vendor/github.com/felixge/httpsnoop/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

10
vendor/github.com/felixge/httpsnoop/Makefile generated vendored Normal file
View file

@ -0,0 +1,10 @@
.PHONY: ci generate clean
ci: clean generate
go test -v ./...
generate:
go generate .
clean:
rm -rf *_generated*.go

95
vendor/github.com/felixge/httpsnoop/README.md generated vendored Normal file
View file

@ -0,0 +1,95 @@
# httpsnoop
Package httpsnoop provides an easy way to capture http related metrics (i.e.
response time, bytes written, and http status code) from your application's
http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop)
[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop)
## Usage Example
```go
// myH is your app's http handler, perhaps a http.ServeMux or similar.
var myH http.Handler
// wrappedH wraps myH in order to log every request.
wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
m := httpsnoop.CaptureMetrics(myH, w, r)
log.Printf(
"%s %s (code=%d dt=%s written=%d)",
r.Method,
r.URL,
m.Code,
m.Duration,
m.Written,
)
})
http.ListenAndServe(":8080", wrappedH)
```
## Why this package exists
Instrumenting an application's http.Handler is surprisingly difficult.
However if you google for e.g. "capture ResponseWriter status code" you'll find
lots of advise and code examples that suggest it to be a fairly trivial
undertaking. Unfortunately everything I've seen so far has a high chance of
breaking your application.
The main problem is that a `http.ResponseWriter` often implements additional
interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
in your own struct that also implements the `http.ResponseWriter` interface
will hide the additional interfaces mentioned above. This has a high change of
introducing subtle bugs into any non-trivial application.
Another approach I've seen people take is to return a struct that implements
all of the interfaces above. However, that's also problematic, because it's
difficult to fake some of these interfaces behaviors when the underlying
`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
because an application may choose to operate differently, merely because it
detects the presence of these additional interfaces.
This package solves this problem by checking which additional interfaces a
`http.ResponseWriter` implements, returning a wrapped version implementing the
exact same set of interfaces.
Additionally this package properly handles edge cases such as `WriteHeader` not
being called, or called more than once, as well as concurrent calls to
`http.ResponseWriter` methods, and even calls happening after the wrapped
`ServeHTTP` has already returned.
Unfortunately this package is not perfect either. It's possible that it is
still missing some interfaces provided by the go core (let me know if you find
one), and it won't work for applications adding their own interfaces into the
mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
`http.ResponseWriter` and type-assert the result to its other interfaces.
However, hopefully the explanation above has sufficiently scared you of rolling
your own solution to this problem. httpsnoop may still break your application,
but at least it tries to avoid it as much as possible.
Anyway, the real problem here is that smuggling additional interfaces inside
`http.ResponseWriter` is a problematic design choice, but it probably goes as
deep as the Go language specification itself. But that's okay, I still prefer
Go over the alternatives ;).
## Performance
```
BenchmarkBaseline-8 20000 94912 ns/op
BenchmarkCaptureMetrics-8 20000 95461 ns/op
```
As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
overhead of ~500 ns per http request on my machine. However, the margin of
error appears to be larger than that, therefor it should be reasonable to
assume that the overhead introduced by `CaptureMetrics` is absolutely
negligible.
## License
MIT

79
vendor/github.com/felixge/httpsnoop/capture_metrics.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
package httpsnoop
import (
"io"
"net/http"
"time"
)
// Metrics holds metrics captured from CaptureMetrics.
type Metrics struct {
// Code is the first http response code passed to the WriteHeader func of
// the ResponseWriter. If no such call is made, a default code of 200 is
// assumed instead.
Code int
// Duration is the time it took to execute the handler.
Duration time.Duration
// Written is the number of bytes successfully written by the Write or
// ReadFrom function of the ResponseWriter. ResponseWriters may also write
// data to their underlaying connection directly (e.g. headers), but those
// are not tracked. Therefor the number of Written bytes will usually match
// the size of the response body.
Written int64
}
// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
// returns the metrics it captured from it.
func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
hnd.ServeHTTP(ww, r)
})
}
// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
// resulting metrics. This is very similar to CaptureMetrics (which is just
// sugar on top of this func), but is a more usable interface if your
// application doesn't use the Go http.Handler interface.
func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
var (
start = time.Now()
m = Metrics{Code: http.StatusOK}
headerWritten bool
hooks = Hooks{
WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
return func(code int) {
next(code)
if !headerWritten {
m.Code = code
headerWritten = true
}
}
},
Write: func(next WriteFunc) WriteFunc {
return func(p []byte) (int, error) {
n, err := next(p)
m.Written += int64(n)
headerWritten = true
return n, err
}
},
ReadFrom: func(next ReadFromFunc) ReadFromFunc {
return func(src io.Reader) (int64, error) {
n, err := next(src)
headerWritten = true
m.Written += n
return n, err
}
},
}
)
fn(Wrap(w, hooks))
m.Duration = time.Since(start)
return m
}

10
vendor/github.com/felixge/httpsnoop/docs.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
// Package httpsnoop provides an easy way to capture http related metrics (i.e.
// response time, bytes written, and http status code) from your application's
// http.Handlers.
//
// Doing this requires non-trivial wrapping of the http.ResponseWriter
// interface, which is also exposed for users interested in a more low-level
// API.
package httpsnoop
//go:generate go run codegen/main.go

View file

@ -0,0 +1,436 @@
// +build go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
import (
"bufio"
"io"
"net"
"net/http"
)
// HeaderFunc is part of the http.ResponseWriter interface.
type HeaderFunc func() http.Header
// WriteHeaderFunc is part of the http.ResponseWriter interface.
type WriteHeaderFunc func(code int)
// WriteFunc is part of the http.ResponseWriter interface.
type WriteFunc func(b []byte) (int, error)
// FlushFunc is part of the http.Flusher interface.
type FlushFunc func()
// CloseNotifyFunc is part of the http.CloseNotifier interface.
type CloseNotifyFunc func() <-chan bool
// HijackFunc is part of the http.Hijacker interface.
type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
// ReadFromFunc is part of the io.ReaderFrom interface.
type ReadFromFunc func(src io.Reader) (int64, error)
// PushFunc is part of the http.Pusher interface.
type PushFunc func(target string, opts *http.PushOptions) error
// Hooks defines a set of method interceptors for methods included in
// http.ResponseWriter as well as some others. You can think of them as
// middleware for the function calls they target. See Wrap for more details.
type Hooks struct {
Header func(HeaderFunc) HeaderFunc
WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
Write func(WriteFunc) WriteFunc
Flush func(FlushFunc) FlushFunc
CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
Hijack func(HijackFunc) HijackFunc
ReadFrom func(ReadFromFunc) ReadFromFunc
Push func(PushFunc) PushFunc
}
// Wrap returns a wrapped version of w that provides the exact same interface
// as w. Specifically if w implements any combination of:
//
// - http.Flusher
// - http.CloseNotifier
// - http.Hijacker
// - io.ReaderFrom
// - http.Pusher
//
// The wrapped version will implement the exact same combination. If no hooks
// are set, the wrapped version also behaves exactly as w. Hooks targeting
// methods not supported by w are ignored. Any other hooks will intercept the
// method they target and may modify the call's arguments and/or return values.
// The CaptureMetrics implementation serves as a working example for how the
// hooks can be used.
func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
rw := &rw{w: w, h: hooks}
_, i0 := w.(http.Flusher)
_, i1 := w.(http.CloseNotifier)
_, i2 := w.(http.Hijacker)
_, i3 := w.(io.ReaderFrom)
_, i4 := w.(http.Pusher)
switch {
// combination 1/32
case !i0 && !i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
}{rw, rw}
// combination 2/32
case !i0 && !i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Pusher
}{rw, rw, rw}
// combination 3/32
case !i0 && !i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
}{rw, rw, rw}
// combination 4/32
case !i0 && !i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw}
// combination 5/32
case !i0 && !i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
}{rw, rw, rw}
// combination 6/32
case !i0 && !i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
http.Pusher
}{rw, rw, rw, rw}
// combination 7/32
case !i0 && !i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 8/32
case !i0 && !i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 9/32
case !i0 && i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
}{rw, rw, rw}
// combination 10/32
case !i0 && i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw}
// combination 11/32
case !i0 && i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 12/32
case !i0 && i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 13/32
case !i0 && i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 14/32
case !i0 && i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 15/32
case !i0 && i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 16/32
case !i0 && i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 17/32
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 18/32
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Pusher
}{rw, rw, rw, rw}
// combination 19/32
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 20/32
case i0 && !i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 21/32
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 22/32
case i0 && !i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 23/32
case i0 && !i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 24/32
case i0 && !i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 25/32
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 26/32
case i0 && i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 27/32
case i0 && i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 28/32
case i0 && i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 29/32
case i0 && i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 30/32
case i0 && i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 31/32
case i0 && i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
// combination 32/32
case i0 && i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw, rw}
}
panic("unreachable")
}
type rw struct {
w http.ResponseWriter
h Hooks
}
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil {
f = w.h.Header(f)
}
return f()
}
func (w *rw) WriteHeader(code int) {
f := w.w.(http.ResponseWriter).WriteHeader
if w.h.WriteHeader != nil {
f = w.h.WriteHeader(f)
}
f(code)
}
func (w *rw) Write(b []byte) (int, error) {
f := w.w.(http.ResponseWriter).Write
if w.h.Write != nil {
f = w.h.Write(f)
}
return f(b)
}
func (w *rw) Flush() {
f := w.w.(http.Flusher).Flush
if w.h.Flush != nil {
f = w.h.Flush(f)
}
f()
}
func (w *rw) CloseNotify() <-chan bool {
f := w.w.(http.CloseNotifier).CloseNotify
if w.h.CloseNotify != nil {
f = w.h.CloseNotify(f)
}
return f()
}
func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
f := w.w.(http.Hijacker).Hijack
if w.h.Hijack != nil {
f = w.h.Hijack(f)
}
return f()
}
func (w *rw) ReadFrom(src io.Reader) (int64, error) {
f := w.w.(io.ReaderFrom).ReadFrom
if w.h.ReadFrom != nil {
f = w.h.ReadFrom(f)
}
return f(src)
}
func (w *rw) Push(target string, opts *http.PushOptions) error {
f := w.w.(http.Pusher).Push
if w.h.Push != nil {
f = w.h.Push(f)
}
return f(target, opts)
}
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

View file

@ -0,0 +1,278 @@
// +build !go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
import (
"bufio"
"io"
"net"
"net/http"
)
// HeaderFunc is part of the http.ResponseWriter interface.
type HeaderFunc func() http.Header
// WriteHeaderFunc is part of the http.ResponseWriter interface.
type WriteHeaderFunc func(code int)
// WriteFunc is part of the http.ResponseWriter interface.
type WriteFunc func(b []byte) (int, error)
// FlushFunc is part of the http.Flusher interface.
type FlushFunc func()
// CloseNotifyFunc is part of the http.CloseNotifier interface.
type CloseNotifyFunc func() <-chan bool
// HijackFunc is part of the http.Hijacker interface.
type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
// ReadFromFunc is part of the io.ReaderFrom interface.
type ReadFromFunc func(src io.Reader) (int64, error)
// Hooks defines a set of method interceptors for methods included in
// http.ResponseWriter as well as some others. You can think of them as
// middleware for the function calls they target. See Wrap for more details.
type Hooks struct {
Header func(HeaderFunc) HeaderFunc
WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
Write func(WriteFunc) WriteFunc
Flush func(FlushFunc) FlushFunc
CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
Hijack func(HijackFunc) HijackFunc
ReadFrom func(ReadFromFunc) ReadFromFunc
}
// Wrap returns a wrapped version of w that provides the exact same interface
// as w. Specifically if w implements any combination of:
//
// - http.Flusher
// - http.CloseNotifier
// - http.Hijacker
// - io.ReaderFrom
//
// The wrapped version will implement the exact same combination. If no hooks
// are set, the wrapped version also behaves exactly as w. Hooks targeting
// methods not supported by w are ignored. Any other hooks will intercept the
// method they target and may modify the call's arguments and/or return values.
// The CaptureMetrics implementation serves as a working example for how the
// hooks can be used.
func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
rw := &rw{w: w, h: hooks}
_, i0 := w.(http.Flusher)
_, i1 := w.(http.CloseNotifier)
_, i2 := w.(http.Hijacker)
_, i3 := w.(io.ReaderFrom)
switch {
// combination 1/16
case !i0 && !i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
}{rw, rw}
// combination 2/16
case !i0 && !i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
}{rw, rw, rw}
// combination 3/16
case !i0 && !i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
}{rw, rw, rw}
// combination 4/16
case !i0 && !i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 5/16
case !i0 && i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
}{rw, rw, rw}
// combination 6/16
case !i0 && i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 7/16
case !i0 && i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 8/16
case !i0 && i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 9/16
case i0 && !i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 10/16
case i0 && !i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 11/16
case i0 && !i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 12/16
case i0 && !i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 13/16
case i0 && i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 14/16
case i0 && i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 15/16
case i0 && i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 16/16
case i0 && i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
}
panic("unreachable")
}
type rw struct {
w http.ResponseWriter
h Hooks
}
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil {
f = w.h.Header(f)
}
return f()
}
func (w *rw) WriteHeader(code int) {
f := w.w.(http.ResponseWriter).WriteHeader
if w.h.WriteHeader != nil {
f = w.h.WriteHeader(f)
}
f(code)
}
func (w *rw) Write(b []byte) (int, error) {
f := w.w.(http.ResponseWriter).Write
if w.h.Write != nil {
f = w.h.Write(f)
}
return f(b)
}
func (w *rw) Flush() {
f := w.w.(http.Flusher).Flush
if w.h.Flush != nil {
f = w.h.Flush(f)
}
f()
}
func (w *rw) CloseNotify() <-chan bool {
f := w.w.(http.CloseNotifier).CloseNotify
if w.h.CloseNotify != nil {
f = w.h.CloseNotify(f)
}
return f()
}
func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
f := w.w.(http.Hijacker).Hijack
if w.h.Hijack != nil {
f = w.h.Hijack(f)
}
return f()
}
func (w *rw) ReadFrom(src io.Reader) (int64, error) {
f := w.w.(io.ReaderFrom).ReadFrom
if w.h.ReadFrom != nil {
f = w.h.ReadFrom(f)
}
return f(src)
}
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

29
vendor/github.com/go-logr/logr/.golangci.yaml generated vendored Normal file
View file

@ -0,0 +1,29 @@
run:
timeout: 1m
tests: true
linters:
disable-all: true
enable:
- asciicheck
- deadcode
- errcheck
- forcetypeassert
- gocritic
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues:
exclude-use-default: false
max-issues-per-linter: 0
max-same-issues: 10

6
vendor/github.com/go-logr/logr/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,6 @@
# CHANGELOG
## v1.0.0-rc1
This is the first logged release. Major changes (including breaking changes)
have occurred since earlier tags.

17
vendor/github.com/go-logr/logr/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,17 @@
# Contributing
Logr is open to pull-requests, provided they fit within the intended scope of
the project. Specifically, this library aims to be VERY small and minimalist,
with no external dependencies.
## Compatibility
This project intends to follow [semantic versioning](http://semver.org) and
is very strict about compatibility. Any proposed changes MUST follow those
rules.
## Performance
As a logging library, logr must be as light-weight as possible. Any proposed
code change must include results of running the [benchmark](./benchmark)
before and after the change.

View file

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The OpenTracing Authors
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

278
vendor/github.com/go-logr/logr/README.md generated vendored Normal file
View file

@ -0,0 +1,278 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
an implementation of logging - it is an API. In fact it is two APIs with two
different sets of users.
The `Logger` type is intended for application and library authors. It provides
a relatively small API which can be used everywhere you want to emit logs. It
defers the actual act of writing logs (to files, to stdout, or whatever) to the
`LogSink` interface.
The `LogSink` interface is intended for logging library implementers. It is a
pure interface which can be implemented by logging frameworks to provide the actual logging
functionality.
This decoupling allows application and library developers to write code in
terms of `logr.Logger` (which has very low dependency fan-out) while the
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
Application developers can then switch out implementations as necessary.
Many people assert that libraries should not be logging, and as such efforts
like this are pointless. Those people are welcome to convince the authors of
the tens-of-thousands of libraries that *DO* write logs that they are all
wrong. In the meantime, logr takes a more practical approach.
## Typical usage
Somewhere, early in an application's life, it will make a decision about which
logging library (implementation) it actually wants to use. Something like:
```
func main() {
// ... other setup code ...
// Create the "root" logger. We have chosen the "logimpl" implementation,
// which takes some initial parameters and returns a logr.Logger.
logger := logimpl.New(param1, param2)
// ... other setup code ...
```
Most apps will call into other libraries, create structures to govern the flow,
etc. The `logr.Logger` object can be passed to these other libraries, stored
in structs, or even used as a package-global variable, if needed. For example:
```
app := createTheAppObject(logger)
app.Run()
```
Outside of this early setup, no other packages need to know about the choice of
implementation. They write logs in terms of the `logr.Logger` that they
received:
```
type appObject struct {
// ... other fields ...
logger logr.Logger
// ... other fields ...
}
func (app *appObject) Run() {
app.logger.Info("starting up", "timestamp", time.Now())
// ... app code ...
```
## Background
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
### Inspiration
Before you consider this package, please read [this blog post by the
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
he has to say, and it largely aligns with our own experiences.
### Differences from Dave's ideas
The main differences are:
1. Dave basically proposes doing away with the notion of a logging API in favor
of `fmt.Printf()`. We disagree, especially when you consider things like output
locations, timestamps, file and line decorations, and structured logging. This
package restricts the logging API to just 2 types of logs: info and error.
Info logs are things you want to tell the user which are not errors. Error
logs are, well, errors. If your code receives an `error` from a subordinate
function call and is logging that `error` *and not returning it*, use error
logs.
2. Verbosity-levels on info logs. This gives developers a chance to indicate
arbitrary grades of importance for info logs, without assigning names with
semantic meaning such as "warning", "trace", and "debug." Superficially this
may feel very similar, but the primary difference is the lack of semantics.
Because verbosity is a numerical value, it's safe to assume that an app running
with higher verbosity means more (and less important) logs will be generated.
## Implementations (non-exhaustive)
There are implementations for the following logging libraries:
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
## FAQ
### Conceptual
#### Why structured logging?
- **Structured logs are more easily queryable**: Since you've got
key-value pairs, it's much easier to query your structured logs for
particular values by filtering on the contents of a particular key --
think searching request logs for error codes, Kubernetes reconcilers for
the name and namespace of the reconciled object, etc.
- **Structured logging makes it easier to have cross-referenceable logs**:
Similarly to searchability, if you maintain conventions around your
keys, it becomes easy to gather all log lines related to a particular
concept.
- **Structured logs allow better dimensions of filtering**: if you have
structure to your logs, you've got more precise control over how much
information is logged -- you might choose in a particular configuration
to log certain keys but not others, only log lines where a certain key
matches a certain value, etc., instead of just having v-levels and names
to key off of.
- **Structured logs better represent structured data**: sometimes, the
data that you want to log is inherently structured (think tuple-link
objects.) Structured logs allow you to preserve that structure when
outputting.
#### Why V-levels?
**V-levels give operators an easy way to control the chattiness of log
operations**. V-levels provide a way for a given package to distinguish
the relative importance or verbosity of a given log message. Then, if
a particular logger or package is logging too many messages, the user
of the package can simply change the v-levels for that library.
#### Why not named levels, like Info/Warning/Error?
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
from Dave's ideas](#differences-from-daves-ideas).
#### Why not allow format strings, too?
**Format strings negate many of the benefits of structured logs**:
- They're not easily searchable without resorting to fuzzy searching,
regular expressions, etc.
- They don't store structured data well, since contents are flattened into
a string.
- They're not cross-referenceable.
- They don't compress easily, since the message is not constant.
(Unless you turn positional parameters into key-value pairs with numerical
keys, at which point you've gotten key-value logging with meaningless
keys.)
### Practical
#### Why key-value pairs, and not a map?
Key-value pairs are *much* easier to optimize, especially around
allocations. Zap (a structured logger that inspired logr's interface) has
[performance measurements](https://github.com/uber-go/zap#performance)
that show this quite nicely.
While the interface ends up being a little less obvious, you get
potentially better performance, plus avoid making users type
`map[string]string{}` every time they want to log.
#### What if my V-levels differ between libraries?
That's fine. Control your V-levels on a per-logger basis, and use the
`WithName` method to pass different loggers to different libraries.
Generally, you should take care to ensure that you have relatively
consistent V-levels within a given logger, however, as this makes deciding
on what verbosity of logs to request easier.
#### But I really want to use a format string!
That's not actually a question. Assuming your question is "how do
I convert my mental model of logging with format strings to logging with
constant messages":
1. Figure out what the error actually is, as you'd write in a TL;DR style,
and use that as a message.
2. For every place you'd write a format specifier, look to the word before
it, and add that as a key value pair.
For instance, consider the following examples (all taken from spots in the
Kubernetes codebase):
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
responseCode, err)` becomes `logger.Error(err, "client returned an
error", "code", responseCode)`
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
response when requesting url", "attempt", retries, "after
seconds", seconds, "url", url)`
If you *really* must use a format string, use it in a key's value, and
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
reflect over type %T")` becomes `logger.Info("unable to reflect over
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
this is necessary should be few and far between.
#### How do I choose my V-levels?
This is basically the only hard constraint: increase V-levels to denote
more verbose or more debug-y logs.
Otherwise, you can start out with `0` as "you always want to see this",
`1` as "common logging that you might *possibly* want to turn off", and
`10` as "I would like to performance-test your log collection stack."
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.)
#### How do I choose my keys?
Keys are fairly flexible, and can hold more or less any string
value. For best compatibility with implementations and consistency
with existing code in other projects, there are a few conventions you
should consider.
- Make your keys human-readable.
- Constant keys are generally a good idea.
- Be consistent across your codebase.
- Keys should naturally match parts of the message string.
- Use lower case for simple keys and
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
more complex ones. Kubernetes is one example of a project that has
[adopted that
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
While key names are mostly unrestricted (and spaces are acceptable),
it's generally a good idea to stick to printable ascii characters, or at
least match the general character set of your log lines.
#### Why should keys be constant values?
The point of structured logging is to make later log processing easier. Your
keys are, effectively, the schema of each log message. If you use different
keys across instances of the same log line, you will make your structured logs
much harder to use. `Sprintf()` is for values, not for keys!
#### Why is this not a pure interface?
The Logger type is implemented as a struct in order to allow the Go compiler to
optimize things like high-V `Info` logs that are not triggered. Not all of
these implementations are implemented yet, but this structure was suggested as
a way to ensure they *can* be implemented. All of the real work is behind the
`LogSink` interface.
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging

54
vendor/github.com/go-logr/logr/discard.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
/*
Copyright 2020 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// Discard returns a Logger that discards all messages logged to it. It can be
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
return Logger{
level: 0,
sink: discardLogSink{},
}
}
// discardLogSink is a LogSink that discards all messages.
type discardLogSink struct{}
// Verify that it actually implements the interface
var _ LogSink = discardLogSink{}
func (l discardLogSink) Init(RuntimeInfo) {
}
func (l discardLogSink) Enabled(int) bool {
return false
}
func (l discardLogSink) Info(int, string, ...interface{}) {
}
func (l discardLogSink) Error(error, string, ...interface{}) {
}
func (l discardLogSink) WithValues(...interface{}) LogSink {
return l
}
func (l discardLogSink) WithName(string) LogSink {
return l
}

759
vendor/github.com/go-logr/logr/funcr/funcr.go generated vendored Normal file
View file

@ -0,0 +1,759 @@
/*
Copyright 2021 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package funcr implements formatting of structured log messages and
// optionally captures the call site and timestamp.
//
// The simplest way to use it is via its implementation of a
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
// Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
// Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
// standard JSON tags (all except "string").
package funcr
import (
"bytes"
"encoding"
"fmt"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
)
// New returns a logr.Logger which is implemented by an arbitrary function.
func New(fn func(prefix, args string), opts Options) logr.Logger {
return logr.New(newSink(fn, NewFormatter(opts)))
}
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
// and produces JSON output.
func NewJSON(fn func(obj string), opts Options) logr.Logger {
fnWrapper := func(_, obj string) {
fn(obj)
}
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
}
// Underlier exposes access to the underlying logging function. Since
// callers only have a logr.Logger, they have to know which
// implementation is in use, so this interface is less of an
// abstraction and more of a way to test type conversion.
type Underlier interface {
GetUnderlying() func(prefix, args string)
}
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
l := &fnlogger{
Formatter: formatter,
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
l.Formatter.AddCallDepth(1)
return l
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// LogCaller tells funcr to add a "caller" key to some or all log lines.
// This has some overhead, so some users might not want it.
LogCaller MessageClass
// LogCallerFunc tells funcr to also log the calling function name. This
// has no effect if caller logging is not enabled (see Options.LogCaller).
LogCallerFunc bool
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
// overhead, so some users might not want it.
LogTimestamp bool
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
// is enabled. If not specified, a default format will be used. For more
// details, see docs for Go's time.Layout.
TimestampFormat string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
Verbosity int
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
// while a log line is being rendered. The kvList argument follows logr
// conventions - each pair of slice elements is comprised of a string key
// and an arbitrary value (verified and sanitized before calling this
// hook). The value returned must follow the same conventions. This hook
// can be used to audit or modify logged data. For example, you might want
// to prefix all of funcr's built-in keys with some string. This hook is
// only called for built-in (provided by funcr itself) key-value pairs.
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{}
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{}
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{}
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
// slice, array, or map the depth is increased by one. When the maximum is
// reached, the value will be converted to a string indicating that the max
// depth has been exceeded. If this field is not specified, a default
// value will be used.
MaxLogDepth int
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// fnlogger inherits some of its LogSink implementation from Formatter
// and just needs to add some glue code.
type fnlogger struct {
Formatter
write func(prefix, args string)
}
func (l fnlogger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) GetUnderlying() func(prefix, args string) {
return l.write
}
// Assert conformance to the interfaces.
var _ logr.LogSink = &fnlogger{}
var _ logr.CallDepthLogSink = &fnlogger{}
var _ Underlier = &fnlogger{}
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
func NewFormatter(opts Options) Formatter {
return newFormatter(opts, outputKeyValue)
}
// NewFormatterJSON constructs a Formatter which emits strict JSON.
func NewFormatterJSON(opts Options) Formatter {
return newFormatter(opts, outputJSON)
}
// Defaults for Options.
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
const defaultMaxLogDepth = 16
func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.TimestampFormat == "" {
opts.TimestampFormat = defaultTimestampFormat
}
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
values: nil,
depth: 0,
opts: opts,
}
return f
}
// Formatter is an opaque struct which can be embedded in a LogSink
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []interface{}
valuesStr string
depth int
opts Options
}
// outputFormat indicates which outputFormat to use.
type outputFormat int
const (
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
outputKeyValue outputFormat = iota
// outputJSON emits strict JSON.
outputJSON
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{}
// render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
}
continuing = true
buf.WriteString(f.valuesStr)
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If continuing is
// true, it assumes that the buffer has previous values and will emit a
// separator (which depends on the output format) before the first pair it
// writes. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
v := kvList[i+1]
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
buf.WriteByte(' ')
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) pretty(value interface{}) string {
return f.prettyWithFlags(value, 0, 0)
}
const (
flagRawStruct = 0x1 // do not print braces on structs
)
// TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"`
}
// Handle types that take full control of logging.
if v, ok := value.(logr.Marshaler); ok {
// Replace the value with what the type wants to get logged.
// That then gets handled below via reflection.
value = v.MarshalLog()
}
// Handle types that want to format themselves.
switch v := value.(type) {
case fmt.Stringer:
value = v.String()
case error:
value = v.Error()
}
// Handling the most common types without reflect is a small perf win.
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case string:
return prettyString(v)
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(v, 10)
case uintptr:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
case complex64:
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
case complex128:
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
case PseudoStruct:
buf := bytes.NewBuffer(make([]byte, 0, 1024))
v = f.sanitize(v)
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
}
// arbitrary keys might need escaping
buf.WriteString(prettyString(v[i].(string)))
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
t := reflect.TypeOf(value)
if t == nil {
return "null"
}
v := reflect.ValueOf(value)
switch t.Kind() {
case reflect.Bool:
return strconv.FormatBool(v.Bool())
case reflect.String:
return prettyString(v.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(int64(v.Int()), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(uint64(v.Uint()), 10)
case reflect.Float32:
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.Complex64:
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
case reflect.Complex128:
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
case reflect.Struct:
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
// reflect says this field is only defined for non-exported fields.
continue
}
if !v.Field(i).CanInterface() {
// reflect isn't clear exactly what this means, but we can't use it.
continue
}
name := ""
omitempty := false
if tag, found := fld.Tag.Lookup("json"); found {
if tag == "-" {
continue
}
if comma := strings.Index(tag, ","); comma != -1 {
if n := tag[:comma]; n != "" {
name = n
}
rest := tag[comma:]
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
omitempty = true
}
} else {
name = tag
}
}
if omitempty && isEmpty(v.Field(i)) {
continue
}
if i > 0 {
buf.WriteByte(',')
}
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
}
if name == "" {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
case reflect.Slice, reflect.Array:
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
}
buf.WriteByte(']')
return buf.String()
case reflect.Map:
buf.WriteByte('{')
// This does not sort the map keys, for best perf.
it := v.MapRange()
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
}
// If a map key supports TextMarshaler, use it.
keystr := ""
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
txt, err := m.MarshalText()
if err != nil {
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
} else {
keystr = string(txt)
}
keystr = prettyString(keystr)
} else {
// prettyWithFlags will produce already-escaped values
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
if t.Key().Kind() != reflect.String {
// JSON only does string keys. Unlike Go's standard JSON, we'll
// convert just about anything to a string.
keystr = prettyString(keystr)
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
buf.WriteByte('}')
return buf.String()
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return "null"
}
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
}
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
}
func prettyString(s string) string {
// Avoid escaping (which does allocations) if we can.
if needsEscape(s) {
return strconv.Quote(s)
}
b := bytes.NewBuffer(make([]byte, 0, 1024))
b.WriteByte('"')
b.WriteString(s)
b.WriteByte('"')
return b.String()
}
// needsEscape determines whether the input string needs to be escaped or not,
// without doing any allocations.
func needsEscape(s string) bool {
for _, r := range s {
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
return true
}
}
return false
}
func isEmpty(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
// Caller represents the original call site for a log line, after considering
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
// Line fields will always be provided, while the Func field is optional.
// Users can set the render hook fields in Options to examine logged key-value
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
// field is enabled for the given MessageClass.
type Caller struct {
// File is the basename of the file for this call site.
File string `json:"file"`
// Line is the line number in the file for this call site.
Line int `json:"line"`
// Func is the function name for this call site, or empty if
// Options.LogCallerFunc is not enabled.
Func string `json:"function,omitempty"`
}
func (f Formatter) caller() Caller {
// +1 for this frame, +1 for Info/Error.
pc, file, line, ok := runtime.Caller(f.depth + 2)
if !ok {
return Caller{"<unknown>", 0, ""}
}
fn := ""
if f.opts.LogCallerFunc {
if fp := runtime.FuncForPC(pc); fp != nil {
fn = fp.Name()
}
}
return Caller{filepath.Base(file), line, fn}
}
const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string {
const snipLen = 16
snip := f.pretty(v)
if len(snip) > snipLen {
snip = snip[:snipLen]
}
return snip
}
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
_, ok := kvList[i].(string)
if !ok {
kvList[i] = f.nonStringKey(kvList[i])
}
}
return kvList
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
func (f *Formatter) Init(info logr.RuntimeInfo) {
f.depth += info.CallDepth
}
// Enabled checks whether an info message at the given level should be logged.
func (f Formatter) Enabled(level int) bool {
return level <= f.opts.Verbosity
}
// GetDepth returns the current depth of this Formatter. This is useful for
// implementations which do their own caller attribution.
func (f Formatter) GetDepth() int {
return f.depth
}
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
return prefix, f.render(args, kvList)
}
// FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Error {
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
// name elements. Callers should not pass '/' in the provided name string, but
// this library does not actually enforce that.
func (f *Formatter) AddName(name string) {
if len(f.prefix) > 0 {
f.prefix += "/"
}
f.prefix += name
}
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
func (f *Formatter) AddValues(kvList []interface{}) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)
vals := f.values
if hook := f.opts.RenderValuesHook; hook != nil {
vals = hook(f.sanitize(vals))
}
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
f.flatten(buf, vals, false, true) // escape user-provided keys
f.valuesStr = buf.String()
}
// AddCallDepth increases the number of stack-frames to skip when attributing
// the log line to a file and line.
func (f *Formatter) AddCallDepth(depth int) {
f.depth += depth
}

501
vendor/github.com/go-logr/logr/logr.go generated vendored Normal file
View file

@ -0,0 +1,501 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This design derives from Dave Cheney's blog:
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
// Package logr defines a general-purpose logging API and abstract interfaces
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
// Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
// are key/value pairs rather than printf-style formatted strings, emphasizing
// "structured logging".
//
// With Go's standard log package, we might write:
// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
// information (such as stack traces) on calls to Error(). Error() messages are
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
// Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
// The higher the V-level of a log line, the less critical it is considered.
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
// if flVerbose >= 2 {
// log.Printf("an unusual thing happened")
// }
//
// We can write:
// logger.V(2).Info("an unusual thing happened")
//
// Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
// will accumulate name "segments". These name segments will be joined in some
// way by the LogSink implementation. It is strongly recommended that name
// segments contain simple identifiers (letters, digits, and hyphen), and do
// not contain characters that could muddle the log output or confuse the
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
// Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
// log.Printf("decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
// // Elsewhere: set up the logger to log the object name.
// obj.logger = mainLogger.WithValues(
// "name", obj.name, "namespace", obj.namespace)
//
// // later on...
// obj.logger.Info("setting foo", "value", targetValue)
//
// Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
// things to consider.
//
// The log message consists of a constant message attached to the log line.
// This should generally be a simple description of what's occurring, and should
// never be a format string. Variable information can then be attached using
// named values.
//
// Keys are arbitrary strings, but should generally be constant values. Values
// may be any Go value, but how the value is formatted is determined by the
// LogSink implementation.
//
// Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
// * be human-readable and meaningful (not auto-generated or simple ordinals)
// * be constant (not dependent on input data)
// * contain only printable characters
// * not contain whitespace or punctuation
// * use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
// output JSON data or will store data for later database (e.g. SQL) queries.
//
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
// * "caller": the calling information (file/line) of a particular log line
// * "error": the underlying error value in the `Error` method
// * "level": the log level
// * "logger": the name of the associated logger
// * "msg": the log message
// * "stacktrace": the stack trace associated with a particular log line or
// error (often from the `Error` message)
// * "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
// Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
// // Underlier exposes access to the underlying logging implementation.
// // Since callers only have a logr.Logger, they have to know which
// // implementation is in use, so this interface is less of an abstraction
// // and more of way to test type conversion.
// type Underlier interface {
// GetUnderlying() <underlying-type>
// }
//
// Logger grants access to the sink to enable type assertions like this:
// func DoSomethingWithImpl(log logr.Logger) {
// if underlier, ok := log.GetSink()(impl.Underlier) {
// implLogger := underlier.GetUnderlying()
// ...
// }
// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
// // WithFooBar changes the foobar parameter in the log sink and returns a
// // new logger with that modified sink. It does nothing for loggers where
// // the sink doesn't support that parameter.
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
// }
// return log
// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
// unexported fields in Logger get lost.
//
// Beware that the same LogSink instance may be shared by different logger
// instances. Calling functions that modify the LogSink will affect all of
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
sink.Init(runtimeInfo)
return logger
}
// setSink stores the sink and updates any related fields. It mutates the
// logger and thus is only safe to use for loggers that are not currently being
// used concurrently.
func (l *Logger) setSink(sink LogSink) {
l.sink = sink
}
// GetSink returns the stored sink.
func (l Logger) GetSink() LogSink {
return l.sink
}
// WithSink returns a copy of the logger with the new sink.
func (l Logger) WithSink(sink LogSink) Logger {
l.setSink(sink)
return l
}
// Logger is an interface to an abstract logging implementation. This is a
// concrete type for performance reasons, but all the real work is passed on to
// a LogSink. Implementations of LogSink should provide their own constructors
// that return Logger, not LogSink.
//
// The underlying sink can be accessed through GetSink and be modified through
// WithSink. This enables the implementation of custom extensions (see "Break
// Glass" in the package documentation). Normally the sink should be used only
// indirectly.
type Logger struct {
sink LogSink
level int
}
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
return l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
//
// The msg argument should be used to add some constant description to the log
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
l.sink.Info(l.level, msg, keysAndValues...)
}
}
// Error logs an error, with the given message and key/value pairs as context.
// It functions similarly to Info, but may have unique behavior, and should be
// preferred for logging errors (see the package documentations for more
// information). The log message will always be emitted, regardless of
// verbosity level.
//
// The msg argument should be used to add context to any underlying error,
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
l.sink.Error(err, msg, keysAndValues...)
}
// V returns a new Logger instance for a specific verbosity level, relative to
// this Logger. In other words, V-levels are additive. A higher verbosity
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
if level < 0 {
level = 0
}
l.level += level
return l
}
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
// WithName returns a new Logger instance with the specified name element added
// to the Logger's name. Successive calls with WithName append additional
// suffixes to the Logger's name. It's strongly recommended that name segments
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
l.setSink(l.sink.WithName(name))
return l
}
// WithCallDepth returns a Logger instance that offsets the call stack by the
// specified number of frames when logging call site information, if possible.
// This is useful for users who have helper functions between the "real" call
// site and the actual calls to Logger methods. If depth is 0 the attribution
// should be to the direct caller of this function. If depth is 1 the
// attribution should skip 1 call frame, and so on. Successive calls to this
// are additive.
//
// If the underlying log implementation supports a WithCallDepth(int) method,
// it will be called and the result returned. If the implementation does not
// support CallDepthLogSink, the original Logger will be returned.
//
// To skip one level, WithCallStackHelper() should be used instead of
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
return l
}
// WithCallStackHelper returns a new Logger instance that skips the direct
// caller when logging call site information, if possible. This is useful for
// users who have helper functions between the "real" call site and the actual
// calls to Logger methods and want to support loggers which depend on marking
// each individual helper function, like loggers based on testing.T.
//
// In addition to using that new logger instance, callers also must call the
// returned function.
//
// If the underlying log implementation supports a WithCallDepth(int) method,
// WithCallDepth(1) will be called to produce a new logger. If it supports a
// WithCallStackHelper() method, that will be also called. If the
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
}
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
helper = withHelper.GetCallStackHelper()
} else {
helper = func() {}
}
return helper, l
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {
// CallDepth is the number of call frames the logr library adds between the
// end-user and the LogSink. LogSink implementations which choose to print
// the original logging site (e.g. file & line) should climb this many
// additional frames to find it.
CallDepth int
}
// runtimeInfo is a static global. It must not be changed at run time.
var runtimeInfo = RuntimeInfo{
CallDepth: 1,
}
// LogSink represents a logging implementation. End-users will generally not
// interact with this type.
type LogSink interface {
// Init receives optional information about the logr library for LogSink
// implementations that need it.
Init(info RuntimeInfo)
// Enabled tests whether this LogSink is enabled at the specified V-level.
// For example, commandline flags might be used to set the logging
// verbosity and disable some info logs.
Enabled(level int) bool
// Info logs a non-error message with the given key/value pairs as context.
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
Info(level int, msg string, keysAndValues ...interface{})
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{})
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
WithName(name string) LogSink
}
// CallDepthLogSink represents a Logger that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
// Implementations that log information about the call site (such as file,
// function, or line) would otherwise log information about the intermediate
// helper functions.
//
// This is an optional interface and implementations are not required to
// support it.
type CallDepthLogSink interface {
// WithCallDepth returns a LogSink that will offset the call
// stack by the specified number of frames when logging call
// site information.
//
// If depth is 0, the LogSink should skip exactly the number
// of call frames defined in RuntimeInfo.CallDepth when Info
// or Error are called, i.e. the attribution should be to the
// direct caller of Logger.Info or Logger.Error.
//
// If depth is 1 the attribution should skip 1 call frame, and so on.
// Successive calls to this are additive.
WithCallDepth(depth int) LogSink
}
// CallStackHelperLogSink represents a Logger that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
//
// This is useful for users who have helper functions between the
// "real" call site and the actual calls to Logger methods.
// Implementations that log information about the call site (such as
// file, function, or line) would otherwise log information about the
// intermediate helper functions.
//
// This is an optional interface and implementations are not required
// to support it. Implementations that choose to support this must not
// simply implement it as WithCallDepth(1), because
// Logger.WithCallStackHelper will call both methods if they are
// present. This should only be implemented for LogSinks that actually
// need it, as with testing.T.
type CallStackHelperLogSink interface {
// GetCallStackHelper returns a function that must be called
// to mark the direct caller as helper function when logging
// call site information.
GetCallStackHelper() func()
}
// Marshaler is an optional interface that logged values may choose to
// implement. Loggers with structured output, such as JSON, should
// log the object return by the MarshalLog method instead of the
// original value.
type Marshaler interface {
// MarshalLog can be used to:
// - ensure that structs are not logged as strings when the original
// value has a String method: return a different type without a
// String method
// - select which fields of a complex type should get logged:
// return a simpler struct with fewer fields
// - log unexported fields: return a different struct
// with exported fields
//
// It may return any value of any type.
MarshalLog() interface{}
}

201
vendor/github.com/go-logr/stdr/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

6
vendor/github.com/go-logr/stdr/README.md generated vendored Normal file
View file

@ -0,0 +1,6 @@
# Minimal Go logging using logr and Go's standard library
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
This package implements the [logr interface](https://github.com/go-logr/logr)
in terms of Go's standard log package(https://pkg.go.dev/log).

170
vendor/github.com/go-logr/stdr/stdr.go generated vendored Normal file
View file

@ -0,0 +1,170 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package stdr implements github.com/go-logr/logr.Logger in terms of
// Go's standard log package.
package stdr
import (
"log"
"os"
"github.com/go-logr/logr"
"github.com/go-logr/logr/funcr"
)
// The global verbosity level. See SetVerbosity().
var globalVerbosity int
// SetVerbosity sets the global level against which all info logs will be
// compared. If this is greater than or equal to the "V" of the logger, the
// message will be logged. A higher value here means more logs will be written.
// The previous verbosity value is returned. This is not concurrent-safe -
// callers must be sure to call it from only one goroutine.
func SetVerbosity(v int) int {
old := globalVerbosity
globalVerbosity = v
return old
}
// New returns a logr.Logger which is implemented by Go's standard log package,
// or something like it. If std is nil, this will use a default logger
// instead.
//
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
func New(std StdLogger) logr.Logger {
return NewWithOptions(std, Options{})
}
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
// log package, or something like it. See New for details.
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
if std == nil {
// Go's log.Default() is only available in 1.16 and higher.
std = log.New(os.Stderr, "", log.LstdFlags)
}
if opts.Depth < 0 {
opts.Depth = 0
}
fopts := funcr.Options{
LogCaller: funcr.MessageClass(opts.LogCaller),
}
sl := &logger{
Formatter: funcr.NewFormatter(fopts),
std: std,
}
// For skipping our own logger.Info/Error.
sl.Formatter.AddCallDepth(1 + opts.Depth)
return logr.New(sl)
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// Depth biases the assumed number of call frames to the "true" caller.
// This is useful when the calling code calls a function which then calls
// stdr (e.g. a logging shim to another API). Values less than zero will
// be treated as zero.
Depth int
// LogCaller tells stdr to add a "caller" key to some or all log lines.
// Go's log package has options to log this natively, too.
LogCaller MessageClass
// TODO: add an option to log the date/time
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
// this adapter.
type StdLogger interface {
// Output is the same as log.Output and log.Logger.Output.
Output(calldepth int, logline string) error
}
type logger struct {
funcr.Formatter
std StdLogger
}
var _ logr.LogSink = &logger{}
var _ logr.CallDepthLogSink = &logger{}
func (l logger) Enabled(level int) bool {
return globalVerbosity >= level
}
func (l logger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l logger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
// Underlier exposes access to the underlying logging implementation. Since
// callers only have a logr.Logger, they have to know which implementation is
// in use, so this interface is less of an abstraction and more of way to test
// type conversion.
type Underlier interface {
GetUnderlying() StdLogger
}
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
// is itself an interface, the result may or may not be a Go log.Logger.
func (l logger) GetUnderlying() StdLogger {
return l.std
}

View file

@ -21,7 +21,7 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The canonical error codes for Google APIs.
// The canonical error codes for gRPC APIs.
//
//
// Sometimes multiple error codes may apply. Services should return
@ -156,7 +156,8 @@ const (
INTERNAL Code = 13
// The service is currently unavailable. This is most likely a
// transient condition, which can be corrected by retrying with
// a backoff.
// a backoff. Note that it is not always safe to retry
// non-idempotent operations.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.

View file

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -22,7 +22,7 @@ option java_outer_classname = "CodeProto";
option java_package = "com.google.rpc";
option objc_class_prefix = "RPC";
// The canonical error codes for Google APIs.
// The canonical error codes for gRPC APIs.
//
//
// Sometimes multiple error codes may apply. Services should return
@ -170,7 +170,8 @@ enum Code {
// The service is currently unavailable. This is most likely a
// transient condition, which can be corrected by retrying with
// a backoff.
// a backoff. Note that it is not always safe to retry
// non-idempotent operations.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -35,7 +35,7 @@ option objc_class_prefix = "RPC";
// receiving the error response before retrying. If retrying requests also
// fail, clients should use an exponential backoff scheme to gradually increase
// the delay between retries based on `retry_delay`, until either a maximum
// number of retires have been reached or a maximum retry delay cap has been
// number of retries have been reached or a maximum retry delay cap has been
// reached.
message RetryInfo {
// Clients should wait at least this long between retrying the same request.
@ -60,7 +60,7 @@ message DebugInfo {
// a service could respond with the project id and set `service_disabled`
// to true.
//
// Also see RetryDetail and Help types for other details about handling a
// Also see RetryInfo and Help types for other details about handling a
// quota failure.
message QuotaFailure {
// A message type used to describe a single quota violation. For example, a
@ -85,6 +85,56 @@ message QuotaFailure {
repeated Violation violations = 1;
}
// Describes the cause of the error with structured details.
//
// Example of an error when contacting the "pubsub.googleapis.com" API when it
// is not enabled:
//
// { "reason": "API_DISABLED"
// "domain": "googleapis.com"
// "metadata": {
// "resource": "projects/123",
// "service": "pubsub.googleapis.com"
// }
// }
//
// This response indicates that the pubsub.googleapis.com API is not enabled.
//
// Example of an error that is returned when attempting to create a Spanner
// instance in a region that is out of stock:
//
// { "reason": "STOCKOUT"
// "domain": "spanner.googleapis.com",
// "metadata": {
// "availableRegions": "us-central1,us-east2"
// }
// }
message ErrorInfo {
// The reason of the error. This is a constant value that identifies the
// proximate cause of the error. Error reasons are unique within a particular
// domain of errors. This should be at most 63 characters and match
// /[A-Z0-9_]+/.
string reason = 1;
// The logical grouping to which the "reason" belongs. The error domain
// is typically the registered service name of the tool or product that
// generates the error. Example: "pubsub.googleapis.com". If the error is
// generated by some common infrastructure, the error domain must be a
// globally unique value that identifies the infrastructure. For Google API
// infrastructure, the error domain is "googleapis.com".
string domain = 2;
// Additional structured details about this error.
//
// Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
// length. When identifying the current value of an exceeded limit, the units
// should be contained in the key, not the value. For example, rather than
// {"instanceLimit": "100/request"}, should be returned as,
// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
// instances that can be created in a single (batch) request.
map<string, string> metadata = 3;
}
// Describes what preconditions have failed.
//
// For example, if an RPC failed because it required the Terms of Service to be
@ -94,13 +144,13 @@ message PreconditionFailure {
// A message type used to describe a single precondition failure.
message Violation {
// The type of PreconditionFailure. We recommend using a service-specific
// enum type to define the supported precondition violation types. For
// enum type to define the supported precondition violation subjects. For
// example, "TOS" for "Terms of Service violation".
string type = 1;
// The subject, relative to the type, that failed.
// For example, "google.com/cloud" relative to the "TOS" type would
// indicate which terms of service is being referenced.
// For example, "google.com/cloud" relative to the "TOS" type would indicate
// which terms of service is being referenced.
string subject = 2;
// A description of how the precondition failed. Developers can use this
@ -153,8 +203,7 @@ message ResourceInfo {
// The name of the resource being accessed. For example, a shared calendar
// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
// error is
// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
// error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
string resource_name = 2;
// The owner of the resource (optional).

View file

@ -28,65 +28,17 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
// used by [gRPC](https://github.com/grpc). The error model is designed to be:
// used by [gRPC](https://github.com/grpc). Each `Status` message contains
// three pieces of data: error code, error message, and error details.
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error
// message, and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
// if needed. The error message should be a developer-facing English message
// that helps developers *understand* and *resolve* the error. If a localized
// user-facing error message is needed, put the localized message in the error
// details or localize it in the client. The optional error details may contain
// arbitrary information about the error. There is a predefined set of error
// detail types in the package `google.rpc` that can be used for common error
// conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
// You can find out more about this error model and how to work with it in the
// [API Design Guide](https://cloud.google.com/apis/design/errors).
type Status struct {
// The status code, which should be an enum value of
// [google.rpc.Code][google.rpc.Code].
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
// by the client.
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
@ -159,7 +111,7 @@ func init() {
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
var fileDescriptor_24d244abaf643bfe = []byte{
// 235 bytes of a gzipped FileDescriptorProto
// 238 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
@ -168,13 +120,13 @@ var fileDescriptor_24d244abaf643bfe = []byte{
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xc4, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8,
0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31,
0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9,
0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f,
0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17,
0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00,
0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00,
0x15, 0x24, 0xff, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0x1c, 0x14, 0xe0, 0x9c, 0xc4, 0x06, 0xb6, 0xd9,
0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x16, 0xcd, 0x7b, 0x60, 0x13, 0x01, 0x00, 0x00,
}
func (this *Status) Compare(that interface{}) int {
@ -626,10 +578,7 @@ func (m *Status) Unmarshal(dAtA []byte) error {
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthStatus
}
if (iNdEx + skippy) < 0 {
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStatus
}
if (iNdEx + skippy) > l {

View file

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ package google.rpc;
import "google/protobuf/any.proto";
option cc_enable_arenas = true;
option go_package = "rpc";
option java_multiple_files = true;
option java_outer_classname = "StatusProto";
@ -26,66 +27,18 @@ option objc_class_prefix = "RPC";
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
// used by [gRPC](https://github.com/grpc). The error model is designed to be:
// used by [gRPC](https://github.com/grpc). Each `Status` message contains
// three pieces of data: error code, error message, and error details.
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error
// message, and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
// if needed. The error message should be a developer-facing English message
// that helps developers *understand* and *resolve* the error. If a localized
// user-facing error message is needed, put the localized message in the error
// details or localize it in the client. The optional error details may contain
// arbitrary information about the error. There is a predefined set of error
// detail types in the package `google.rpc` that can be used for common error
// conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
// You can find out more about this error model and how to work with it in the
// [API Design Guide](https://cloud.google.com/apis/design/errors).
message Status {
// The status code, which should be an enum value of
// [google.rpc.Code][google.rpc.Code].
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
int32 code = 1;
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
// by the client.
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
string message = 2;
// A list of messages that carry the error details. There is a common set of

View file

@ -0,0 +1,180 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descriptor provides functions for obtaining the protocol buffer
// descriptors of generated Go types.
//
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
// programatically interact with the protobuf type system.
package descriptor
import (
"bytes"
"compress/gzip"
"io/ioutil"
"sync"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
// Message is proto.Message with a method to return its descriptor.
//
// Deprecated: The Descriptor method may not be generated by future
// versions of protoc-gen-go, meaning that this interface may not
// be implemented by many concrete message types.
type Message interface {
proto.Message
Descriptor() ([]byte, []int)
}
// ForMessage returns the file descriptor proto containing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
//
// Deprecated: Not all concrete message types satisfy the Message interface.
// Use MessageDescriptorProto instead. If possible, the calling code should
// be rewritten to use protobuf reflection instead.
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
return MessageDescriptorProto(m)
}
type rawDesc struct {
fileDesc []byte
indexes []int
}
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
// Fast-path: check whether raw descriptors are already cached.
origDesc := d
if v, ok := rawDescCache.Load(origDesc); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
// Slow-path: derive the raw descriptor from the v2 descriptor.
// Start with the leaf (a given enum or message declaration) and
// ascend upwards until we hit the parent file descriptor.
var idxs []int
for {
idxs = append(idxs, d.Index())
d = d.Parent()
if d == nil {
// TODO: We could construct a FileDescriptor stub for standalone
// descriptors to satisfy the API.
return nil, nil
}
if _, ok := d.(protoreflect.FileDescriptor); ok {
break
}
}
// Obtain the raw file descriptor.
fd := d.(protoreflect.FileDescriptor)
b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
file := protoimpl.X.CompressGZIP(b)
// Reverse the indexes, since we populated it in reverse.
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
idxs[i], idxs[j] = idxs[j], idxs[i]
}
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
return file, idxs
}
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
// the enum and the index path to reach the enum declaration.
// The returned slices must not be mutated.
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
return ev.EnumDescriptor()
}
ed := protoimpl.X.EnumTypeOf(e)
return deriveRawDescriptor(ed.Descriptor())
}
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
// the message and the index path to reach the message declaration.
// The returned slices must not be mutated.
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
return mv.Descriptor()
}
md := protoimpl.X.MessageTypeOf(m)
return deriveRawDescriptor(md.Descriptor())
}
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
// Fast-path: check whether descriptor protos are already cached.
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
// Slow-path: derive the descriptor proto from the GZIP'd message.
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
if err != nil {
panic(err)
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(err)
}
fd := new(descriptorpb.FileDescriptorProto)
if err := proto.Unmarshal(b, fd); err != nil {
panic(err)
}
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
return fd
}
// EnumDescriptorProto returns the file descriptor proto representing
// the enum and the enum descriptor proto for the enum itself.
// The returned proto messages must not be mutated.
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
rawDesc, idxs := EnumRawDescriptor(e)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
if len(idxs) == 1 {
return fd, fd.EnumType[idxs[0]]
}
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1 : len(idxs)-1] {
md = md.NestedType[i]
}
ed := md.EnumType[idxs[len(idxs)-1]]
return fd, ed
}
// MessageDescriptorProto returns the file descriptor proto representing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
rawDesc, idxs := MessageRawDescriptor(m)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1:] {
md = md.NestedType[i]
}
return fd, md
}

View file

@ -0,0 +1,27 @@
Copyright (c) 2015, Gengo, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Gengo, Inc. nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,23 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
package(default_visibility = ["//visibility:public"])
proto_library(
name = "internal_proto",
srcs = ["errors.proto"],
deps = ["@com_google_protobuf//:any_proto"],
)
go_proto_library(
name = "internal_go_proto",
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
proto = ":internal_proto",
)
go_library(
name = "go_default_library",
embed = [":internal_go_proto"],
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
)

View file

@ -0,0 +1,189 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: internal/errors.proto
package internal
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Error is the generic error returned from unary RPCs.
type Error struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Error) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Error) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
type StreamError struct {
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamError) Reset() { *m = StreamError{} }
func (m *StreamError) String() string { return proto.CompactTextString(m) }
func (*StreamError) ProtoMessage() {}
func (*StreamError) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{1}
}
func (m *StreamError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamError.Unmarshal(m, b)
}
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
}
func (m *StreamError) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamError.Merge(m, src)
}
func (m *StreamError) XXX_Size() int {
return xxx_messageInfo_StreamError.Size(m)
}
func (m *StreamError) XXX_DiscardUnknown() {
xxx_messageInfo_StreamError.DiscardUnknown(m)
}
var xxx_messageInfo_StreamError proto.InternalMessageInfo
func (m *StreamError) GetGrpcCode() int32 {
if m != nil {
return m.GrpcCode
}
return 0
}
func (m *StreamError) GetHttpCode() int32 {
if m != nil {
return m.HttpCode
}
return 0
}
func (m *StreamError) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *StreamError) GetHttpStatus() string {
if m != nil {
return m.HttpStatus
}
return ""
}
func (m *StreamError) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
}
func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
var fileDescriptor_9b093362ca6d1e03 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,26 @@
syntax = "proto3";
package grpc.gateway.runtime;
option go_package = "internal";
import "google/protobuf/any.proto";
// Error is the generic error returned from unary RPCs.
message Error {
string error = 1;
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
int32 code = 2;
string message = 3;
repeated google.protobuf.Any details = 4;
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
message StreamError {
int32 grpc_code = 1;
int32 http_code = 2;
string message = 3;
string http_status = 4;
repeated google.protobuf.Any details = 5;
}

View file

@ -0,0 +1,85 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"context.go",
"convert.go",
"doc.go",
"errors.go",
"fieldmask.go",
"handler.go",
"marshal_httpbodyproto.go",
"marshal_json.go",
"marshal_jsonpb.go",
"marshal_proto.go",
"marshaler.go",
"marshaler_registry.go",
"mux.go",
"pattern.go",
"proto2_convert.go",
"proto_errors.go",
"query.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
deps = [
"//internal:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//descriptor:go_default_library_gen",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@go_googleapis//google/api:httpbody_go_proto",
"@io_bazel_rules_go//proto/wkt:any_go_proto",
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//grpclog:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"context_test.go",
"convert_test.go",
"errors_test.go",
"fieldmask_test.go",
"handler_test.go",
"marshal_httpbodyproto_test.go",
"marshal_json_test.go",
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
"mux_test.go",
"pattern_test.go",
"query_test.go",
],
embed = [":go_default_library"],
deps = [
"//internal:go_default_library",
"//runtime/internal/examplepb:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)

View file

@ -0,0 +1,291 @@
package runtime
import (
"context"
"encoding/base64"
"fmt"
"net"
"net/http"
"net/textproto"
"strconv"
"strings"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// MetadataHeaderPrefix is the http prefix that represents custom metadata
// parameters to or from a gRPC call.
const MetadataHeaderPrefix = "Grpc-Metadata-"
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
// by the IANA) when added to the gRPC context.
const MetadataPrefix = "grpcgateway-"
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
// HTTP headers in a response handled by grpc-gateway
const MetadataTrailerPrefix = "Grpc-Trailer-"
const metadataGrpcTimeout = "Grpc-Timeout"
const metadataHeaderBinarySuffix = "-Bin"
const xForwardedFor = "X-Forwarded-For"
const xForwardedHost = "X-Forwarded-Host"
var (
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
DefaultContextTimeout = 0 * time.Second
)
func decodeBinHeader(v string) ([]byte, error) {
if len(v)%4 == 0 {
// Input was padded, or padding was not necessary.
return base64.StdEncoding.DecodeString(v)
}
return base64.RawStdEncoding.DecodeString(v)
}
/*
AnnotateContext adds context information such as metadata from the request.
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
except that the forwarded destination is not another HTTP service but rather
a gRPC service.
*/
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req)
if err != nil {
return nil, err
}
if md == nil {
return ctx, nil
}
return metadata.NewOutgoingContext(ctx, md), nil
}
// AnnotateIncomingContext adds context information such as metadata from the request.
// Attach metadata as incoming context.
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req)
if err != nil {
return nil, err
}
if md == nil {
return ctx, nil
}
return metadata.NewIncomingContext(ctx, md), nil
}
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
var pairs []string
timeout := DefaultContextTimeout
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
var err error
timeout, err = timeoutDecode(tm)
if err != nil {
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
}
}
for key, vals := range req.Header {
key = textproto.CanonicalMIMEHeaderKey(key)
for _, val := range vals {
// For backwards-compatibility, pass through 'authorization' header with no prefix.
if key == "Authorization" {
pairs = append(pairs, "authorization", val)
}
if h, ok := mux.incomingHeaderMatcher(key); ok {
// Handles "-bin" metadata in grpc, since grpc will do another base64
// encode before sending to server, we need to decode it first.
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
b, err := decodeBinHeader(val)
if err != nil {
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
}
val = string(b)
}
pairs = append(pairs, h, val)
}
}
}
if host := req.Header.Get(xForwardedHost); host != "" {
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
} else if req.Host != "" {
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
}
if addr := req.RemoteAddr; addr != "" {
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
} else {
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
}
}
}
if timeout != 0 {
ctx, _ = context.WithTimeout(ctx, timeout)
}
if len(pairs) == 0 {
return ctx, nil, nil
}
md := metadata.Pairs(pairs...)
for _, mda := range mux.metadataAnnotators {
md = metadata.Join(md, mda(ctx, req))
}
return ctx, md, nil
}
// ServerMetadata consists of metadata sent from gRPC server.
type ServerMetadata struct {
HeaderMD metadata.MD
TrailerMD metadata.MD
}
type serverMetadataKey struct{}
// NewServerMetadataContext creates a new context with ServerMetadata
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
return context.WithValue(ctx, serverMetadataKey{}, md)
}
// ServerMetadataFromContext returns the ServerMetadata in ctx
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
return
}
// ServerTransportStream implements grpc.ServerTransportStream.
// It should only be used by the generated files to support grpc.SendHeader
// outside of gRPC server use.
type ServerTransportStream struct {
mu sync.Mutex
header metadata.MD
trailer metadata.MD
}
// Method returns the method for the stream.
func (s *ServerTransportStream) Method() string {
return ""
}
// Header returns the header metadata of the stream.
func (s *ServerTransportStream) Header() metadata.MD {
s.mu.Lock()
defer s.mu.Unlock()
return s.header.Copy()
}
// SetHeader sets the header metadata.
func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
s.mu.Lock()
s.header = metadata.Join(s.header, md)
s.mu.Unlock()
return nil
}
// SendHeader sets the header metadata.
func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
return s.SetHeader(md)
}
// Trailer returns the cached trailer metadata.
func (s *ServerTransportStream) Trailer() metadata.MD {
s.mu.Lock()
defer s.mu.Unlock()
return s.trailer.Copy()
}
// SetTrailer sets the trailer metadata.
func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
s.mu.Lock()
s.trailer = metadata.Join(s.trailer, md)
s.mu.Unlock()
return nil
}
func timeoutDecode(s string) (time.Duration, error) {
size := len(s)
if size < 2 {
return 0, fmt.Errorf("timeout string is too short: %q", s)
}
d, ok := timeoutUnitToDuration(s[size-1])
if !ok {
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
}
t, err := strconv.ParseInt(s[:size-1], 10, 64)
if err != nil {
return 0, err
}
return d * time.Duration(t), nil
}
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
switch u {
case 'H':
return time.Hour, true
case 'M':
return time.Minute, true
case 'S':
return time.Second, true
case 'm':
return time.Millisecond, true
case 'u':
return time.Microsecond, true
case 'n':
return time.Nanosecond, true
default:
}
return
}
// isPermanentHTTPHeader checks whether hdr belongs to the list of
// permanent request headers maintained by IANA.
// http://www.iana.org/assignments/message-headers/message-headers.xml
func isPermanentHTTPHeader(hdr string) bool {
switch hdr {
case
"Accept",
"Accept-Charset",
"Accept-Language",
"Accept-Ranges",
"Authorization",
"Cache-Control",
"Content-Type",
"Cookie",
"Date",
"Expect",
"From",
"Host",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Schedule-Tag-Match",
"If-Unmodified-Since",
"Max-Forwards",
"Origin",
"Pragma",
"Referer",
"User-Agent",
"Via",
"Warning":
return true
}
return false
}

View file

@ -0,0 +1,318 @@
package runtime
import (
"encoding/base64"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/golang/protobuf/ptypes/wrappers"
)
// String just returns the given string.
// It is just for compatibility to other types.
func String(val string) (string, error) {
return val, nil
}
// StringSlice converts 'val' where individual strings are separated by
// 'sep' into a string slice.
func StringSlice(val, sep string) ([]string, error) {
return strings.Split(val, sep), nil
}
// Bool converts the given string representation of a boolean value into bool.
func Bool(val string) (bool, error) {
return strconv.ParseBool(val)
}
// BoolSlice converts 'val' where individual booleans are separated by
// 'sep' into a bool slice.
func BoolSlice(val, sep string) ([]bool, error) {
s := strings.Split(val, sep)
values := make([]bool, len(s))
for i, v := range s {
value, err := Bool(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Float64 converts the given string representation into representation of a floating point number into float64.
func Float64(val string) (float64, error) {
return strconv.ParseFloat(val, 64)
}
// Float64Slice converts 'val' where individual floating point numbers are separated by
// 'sep' into a float64 slice.
func Float64Slice(val, sep string) ([]float64, error) {
s := strings.Split(val, sep)
values := make([]float64, len(s))
for i, v := range s {
value, err := Float64(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Float32 converts the given string representation of a floating point number into float32.
func Float32(val string) (float32, error) {
f, err := strconv.ParseFloat(val, 32)
if err != nil {
return 0, err
}
return float32(f), nil
}
// Float32Slice converts 'val' where individual floating point numbers are separated by
// 'sep' into a float32 slice.
func Float32Slice(val, sep string) ([]float32, error) {
s := strings.Split(val, sep)
values := make([]float32, len(s))
for i, v := range s {
value, err := Float32(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Int64 converts the given string representation of an integer into int64.
func Int64(val string) (int64, error) {
return strconv.ParseInt(val, 0, 64)
}
// Int64Slice converts 'val' where individual integers are separated by
// 'sep' into a int64 slice.
func Int64Slice(val, sep string) ([]int64, error) {
s := strings.Split(val, sep)
values := make([]int64, len(s))
for i, v := range s {
value, err := Int64(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Int32 converts the given string representation of an integer into int32.
func Int32(val string) (int32, error) {
i, err := strconv.ParseInt(val, 0, 32)
if err != nil {
return 0, err
}
return int32(i), nil
}
// Int32Slice converts 'val' where individual integers are separated by
// 'sep' into a int32 slice.
func Int32Slice(val, sep string) ([]int32, error) {
s := strings.Split(val, sep)
values := make([]int32, len(s))
for i, v := range s {
value, err := Int32(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Uint64 converts the given string representation of an integer into uint64.
func Uint64(val string) (uint64, error) {
return strconv.ParseUint(val, 0, 64)
}
// Uint64Slice converts 'val' where individual integers are separated by
// 'sep' into a uint64 slice.
func Uint64Slice(val, sep string) ([]uint64, error) {
s := strings.Split(val, sep)
values := make([]uint64, len(s))
for i, v := range s {
value, err := Uint64(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Uint32 converts the given string representation of an integer into uint32.
func Uint32(val string) (uint32, error) {
i, err := strconv.ParseUint(val, 0, 32)
if err != nil {
return 0, err
}
return uint32(i), nil
}
// Uint32Slice converts 'val' where individual integers are separated by
// 'sep' into a uint32 slice.
func Uint32Slice(val, sep string) ([]uint32, error) {
s := strings.Split(val, sep)
values := make([]uint32, len(s))
for i, v := range s {
value, err := Uint32(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Bytes converts the given string representation of a byte sequence into a slice of bytes
// A bytes sequence is encoded in URL-safe base64 without padding
func Bytes(val string) ([]byte, error) {
b, err := base64.StdEncoding.DecodeString(val)
if err != nil {
b, err = base64.URLEncoding.DecodeString(val)
if err != nil {
return nil, err
}
}
return b, nil
}
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
func BytesSlice(val, sep string) ([][]byte, error) {
s := strings.Split(val, sep)
values := make([][]byte, len(s))
for i, v := range s {
value, err := Bytes(v)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
func Timestamp(val string) (*timestamp.Timestamp, error) {
var r timestamp.Timestamp
err := jsonpb.UnmarshalString(val, &r)
if err != nil {
return nil, err
}
return &r, nil
}
// Duration converts the given string into a timestamp.Duration.
func Duration(val string) (*duration.Duration, error) {
var r duration.Duration
err := jsonpb.UnmarshalString(val, &r)
if err != nil {
return nil, err
}
return &r, nil
}
// Enum converts the given string into an int32 that should be type casted into the
// correct enum proto type.
func Enum(val string, enumValMap map[string]int32) (int32, error) {
e, ok := enumValMap[val]
if ok {
return e, nil
}
i, err := Int32(val)
if err != nil {
return 0, fmt.Errorf("%s is not valid", val)
}
for _, v := range enumValMap {
if v == i {
return i, nil
}
}
return 0, fmt.Errorf("%s is not valid", val)
}
// EnumSlice converts 'val' where individual enums are separated by 'sep'
// into a int32 slice. Each individual int32 should be type casted into the
// correct enum proto type.
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
s := strings.Split(val, sep)
values := make([]int32, len(s))
for i, v := range s {
value, err := Enum(v, enumValMap)
if err != nil {
return values, err
}
values[i] = value
}
return values, nil
}
/*
Support fot google.protobuf.wrappers on top of primitive types
*/
// StringValue well-known type support as wrapper around string type
func StringValue(val string) (*wrappers.StringValue, error) {
return &wrappers.StringValue{Value: val}, nil
}
// FloatValue well-known type support as wrapper around float32 type
func FloatValue(val string) (*wrappers.FloatValue, error) {
parsedVal, err := Float32(val)
return &wrappers.FloatValue{Value: parsedVal}, err
}
// DoubleValue well-known type support as wrapper around float64 type
func DoubleValue(val string) (*wrappers.DoubleValue, error) {
parsedVal, err := Float64(val)
return &wrappers.DoubleValue{Value: parsedVal}, err
}
// BoolValue well-known type support as wrapper around bool type
func BoolValue(val string) (*wrappers.BoolValue, error) {
parsedVal, err := Bool(val)
return &wrappers.BoolValue{Value: parsedVal}, err
}
// Int32Value well-known type support as wrapper around int32 type
func Int32Value(val string) (*wrappers.Int32Value, error) {
parsedVal, err := Int32(val)
return &wrappers.Int32Value{Value: parsedVal}, err
}
// UInt32Value well-known type support as wrapper around uint32 type
func UInt32Value(val string) (*wrappers.UInt32Value, error) {
parsedVal, err := Uint32(val)
return &wrappers.UInt32Value{Value: parsedVal}, err
}
// Int64Value well-known type support as wrapper around int64 type
func Int64Value(val string) (*wrappers.Int64Value, error) {
parsedVal, err := Int64(val)
return &wrappers.Int64Value{Value: parsedVal}, err
}
// UInt64Value well-known type support as wrapper around uint64 type
func UInt64Value(val string) (*wrappers.UInt64Value, error) {
parsedVal, err := Uint64(val)
return &wrappers.UInt64Value{Value: parsedVal}, err
}
// BytesValue well-known type support as wrapper around bytes[] type
func BytesValue(val string) (*wrappers.BytesValue, error) {
parsedVal, err := Bytes(val)
return &wrappers.BytesValue{Value: parsedVal}, err
}

View file

@ -0,0 +1,5 @@
/*
Package runtime contains runtime helper functions used by
servers which protoc-gen-grpc-gateway generates.
*/
package runtime

View file

@ -0,0 +1,186 @@
package runtime
import (
"context"
"io"
"net/http"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
var (
// HTTPError replies to the request with an error.
//
// HTTPError is called:
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
// - From gateway runtime code, when forwarding the response message results in an error.
//
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
//
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
// serve option.
//
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
// option, set GlobalHTTPErrorHandler to a custom function.
//
// Setting this variable directly to customize error format is deprecated.
HTTPError = MuxOrGlobalHTTPError
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
// WithProtoErrorHandler serve option.
//
// You can set a custom function to this variable to customize error format.
GlobalHTTPErrorHandler = DefaultHTTPError
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
// ServeMux instances not using the WithProtoErrorHandler serve option.
//
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
//
// To customize parsing and routing error handling of a particular ServeMux instance, use the
// WithProtoErrorHandler serve option.
//
// To customize parsing and routing error handling of all ServeMux instances not using the
// WithProtoErrorHandler serve option, set a custom function to this variable.
OtherErrorHandler = DefaultOtherErrorHandler
)
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
if mux.protoErrorHandler != nil {
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
} else {
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
}
}
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
const fallback = `{"error": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
body := &internal.Error{
Error: s.Message(),
Message: s.Message(),
Code: int32(s.Code()),
Details: s.Proto().GetDetails(),
}
buf, merr := marshaler.Marshal(body)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
var wantsTrailers bool
if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
wantsTrailers = true
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if wantsTrailers {
handleForwardResponseTrailer(w, md)
}
}
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
// It simply writes a string representation of the given error into "w".
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
http.Error(w, msg, code)
}

View file

@ -0,0 +1,89 @@
package runtime
import (
"encoding/json"
"io"
"strings"
descriptor2 "github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/genproto/protobuf/field_mask"
)
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
// TODO - should really gate this with a test that the marshaller has used json names
if md != nil {
for _, f := range md.Field {
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
var subType *descriptor.DescriptorProto
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
if f.TypeName != nil {
typeSplit := strings.Split(*f.TypeName, ".")
typeName := typeSplit[len(typeSplit)-1]
for _, t := range md.NestedType {
if typeName == *t.Name {
subType = t
}
}
}
return *f.Name, subType
}
}
}
return name, nil
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, md: md}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
if m, ok := item.node.(map[string]interface{}); ok {
// if the item is an object, then enqueue all of its children
for k, v := range m {
protoName, subMd := translateName(k, item.md)
if subMsg, ok := v.(descriptor2.Message); ok {
_, subMd = descriptor2.ForMessage(subMsg)
}
var path string
if item.path == "" {
path = protoName
} else {
path = item.path + "." + protoName
}
queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
}
} else if len(item.path) > 0 {
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
return fm, nil
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// descriptor for parent message
md *descriptor.DescriptorProto
}

View file

@ -0,0 +1,212 @@
package runtime
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/textproto"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/grpclog"
)
var errEmptyResponse = errors.New("empty response")
// ForwardResponseStream forwards the stream from gRPC server to REST client.
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
f, ok := w.(http.Flusher)
if !ok {
grpclog.Infof("Flush not supported in %T", w)
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
http.Error(w, "unexpected error", http.StatusInternalServerError)
return
}
handleForwardResponseServerMetadata(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
w.Header().Set("Content-Type", marshaler.ContentType())
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
HTTPError(ctx, mux, marshaler, w, req, err)
return
}
var delimiter []byte
if d, ok := marshaler.(Delimited); ok {
delimiter = d.Delimiter()
} else {
delimiter = []byte("\n")
}
var wroteHeader bool
for {
resp, err := recv()
if err == io.EOF {
return
}
if err != nil {
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
return
}
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
return
}
var buf []byte
switch {
case resp == nil:
buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse)))
default:
result := map[string]interface{}{"result": resp}
if rb, ok := resp.(responseBody); ok {
result["result"] = rb.XXX_ResponseBody()
}
buf, err = marshaler.Marshal(result)
}
if err != nil {
grpclog.Infof("Failed to marshal response chunk: %v", err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
return
}
if _, err = w.Write(buf); err != nil {
grpclog.Infof("Failed to send response chunk: %v", err)
return
}
wroteHeader = true
if _, err = w.Write(delimiter); err != nil {
grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
f.Flush()
}
}
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k, vs := range md.HeaderMD {
if h, ok := mux.outgoingHeaderMatcher(k); ok {
for _, v := range vs {
w.Header().Add(h, v)
}
}
}
}
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
for k := range md.TrailerMD {
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
w.Header().Add("Trailer", tKey)
}
}
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
for k, vs := range md.TrailerMD {
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
for _, v := range vs {
w.Header().Add(tKey, v)
}
}
}
// responseBody interface contains method for getting field for marshaling to the response body
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
type responseBody interface {
XXX_ResponseBody() interface{}
}
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
handleForwardResponseTrailerHeader(w, md)
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
contentType = typeMarshaler.ContentTypeFromMessage(resp)
}
w.Header().Set("Content-Type", contentType)
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
HTTPError(ctx, mux, marshaler, w, req, err)
return
}
var buf []byte
var err error
if rb, ok := resp.(responseBody); ok {
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
} else {
buf, err = marshaler.Marshal(resp)
}
if err != nil {
grpclog.Infof("Marshal error: %v", err)
HTTPError(ctx, mux, marshaler, w, req, err)
return
}
if _, err = w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
}
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
if len(opts) == 0 {
return nil
}
for _, opt := range opts {
if err := opt(ctx, w, resp); err != nil {
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
return err
}
}
return nil
}
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
serr := streamError(ctx, mux.streamErrorHandler, err)
if !wroteHeader {
w.WriteHeader(int(serr.HttpCode))
}
buf, merr := marshaler.Marshal(errorChunk(serr))
if merr != nil {
grpclog.Infof("Failed to marshal an error: %v", merr)
return
}
if _, werr := w.Write(buf); werr != nil {
grpclog.Infof("Failed to notify error to client: %v", werr)
return
}
}
// streamError returns the payload for the final message in a response stream
// that represents the given err.
func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
serr := errHandler(ctx, err)
if serr != nil {
return serr
}
// TODO: log about misbehaving stream error handler?
return DefaultHTTPStreamErrorHandler(ctx, err)
}
func errorChunk(err *StreamError) map[string]proto.Message {
return map[string]proto.Message{"error": (*internal.StreamError)(err)}
}

View file

@ -0,0 +1,43 @@
package runtime
import (
"google.golang.org/genproto/googleapis/api/httpbody"
)
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
Marshaler: &JSONPb{OrigName: true},
}
}
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
// google.api.HttpBody message as the full response body if it is
// the actual message used as the response. If not, then this will
// simply fallback to the Marshaler specified as its default Marshaler.
type HTTPBodyMarshaler struct {
Marshaler
}
// ContentType implementation to keep backwards compatibility with marshal interface
func (h *HTTPBodyMarshaler) ContentType() string {
return h.ContentTypeFromMessage(nil)
}
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
// its specified content type otherwise fall back to the default Marshaler.
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.GetContentType()
}
return h.Marshaler.ContentType()
}
// Marshal marshals "v" by returning the body bytes if v is a
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.Data, nil
}
return h.Marshaler.Marshal(v)
}

View file

@ -0,0 +1,45 @@
package runtime
import (
"encoding/json"
"io"
)
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
// with the standard "encoding/json" package of Golang.
// Although it is generally faster for simple proto messages than JSONPb,
// it does not support advanced features of protobuf, e.g. map, oneof, ....
//
// The NewEncoder and NewDecoder types return *json.Encoder and
// *json.Decoder respectively.
type JSONBuiltin struct{}
// ContentType always Returns "application/json".
func (*JSONBuiltin) ContentType() string {
return "application/json"
}
// Marshal marshals "v" into JSON
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
// Unmarshal unmarshals JSON data into "v".
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}
// NewDecoder returns a Decoder which reads JSON stream from "r".
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
return json.NewDecoder(r)
}
// NewEncoder returns an Encoder which writes JSON stream into "w".
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
return json.NewEncoder(w)
}
// Delimiter for newline encoded JSON streams.
func (j *JSONBuiltin) Delimiter() []byte {
return []byte("\n")
}

View file

@ -0,0 +1,262 @@
package runtime
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
)
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
// with the "github.com/golang/protobuf/jsonpb".
// It supports fully functionality of protobuf unlike JSONBuiltin.
//
// The NewDecoder method returns a DecoderWrapper, so the underlying
// *json.Decoder methods can be used.
type JSONPb jsonpb.Marshaler
// ContentType always returns "application/json".
func (*JSONPb) ContentType() string {
return "application/json"
}
// Marshal marshals "v" into JSON.
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
if _, ok := v.(proto.Message); !ok {
return j.marshalNonProtoField(v)
}
var buf bytes.Buffer
if err := j.marshalTo(&buf, v); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
p, ok := v.(proto.Message)
if !ok {
buf, err := j.marshalNonProtoField(v)
if err != nil {
return err
}
_, err = w.Write(buf)
return err
}
return (*jsonpb.Marshaler)(j).Marshal(w, p)
}
var (
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
)
// marshalNonProto marshals a non-message field of a protobuf message.
// This function does not correctly marshals arbitrary data structure into JSON,
// but it is only capable of marshaling non-message field values of protobuf,
// i.e. primitive types, enums; pointers to primitives or enums; maps from
// integer/string types to primitives/enums/pointers to messages.
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if v == nil {
return []byte("null"), nil
}
rv := reflect.ValueOf(v)
for rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return []byte("null"), nil
}
rv = rv.Elem()
}
if rv.Kind() == reflect.Slice {
if rv.IsNil() {
if j.EmitDefaults {
return []byte("[]"), nil
}
return []byte("null"), nil
}
if rv.Type().Elem().Implements(protoMessageType) {
var buf bytes.Buffer
err := buf.WriteByte('[')
if err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
err = buf.WriteByte(',')
if err != nil {
return nil, err
}
}
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
return nil, err
}
}
err = buf.WriteByte(']')
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
}
if rv.Kind() == reflect.Map {
m := make(map[string]*json.RawMessage)
for _, k := range rv.MapKeys() {
buf, err := j.Marshal(rv.MapIndex(k).Interface())
if err != nil {
return nil, err
}
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
}
if j.Indent != "" {
return json.MarshalIndent(m, "", j.Indent)
}
return json.Marshal(m)
}
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
return json.Marshal(enum.String())
}
return json.Marshal(rv.Interface())
}
// Unmarshal unmarshals JSON "data" into "v"
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
return unmarshalJSONPb(data, v)
}
// NewDecoder returns a Decoder which reads JSON stream from "r".
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
d := json.NewDecoder(r)
return DecoderWrapper{Decoder: d}
}
// DecoderWrapper is a wrapper around a *json.Decoder that adds
// support for protos to the Decode method.
type DecoderWrapper struct {
*json.Decoder
}
// Decode wraps the embedded decoder's Decode method to support
// protos using a jsonpb.Unmarshaler.
func (d DecoderWrapper) Decode(v interface{}) error {
return decodeJSONPb(d.Decoder, v)
}
// NewEncoder returns an Encoder which writes JSON stream into "w".
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
return EncoderFunc(func(v interface{}) error {
if err := j.marshalTo(w, v); err != nil {
return err
}
// mimic json.Encoder by adding a newline (makes output
// easier to read when it contains multiple encoded items)
_, err := w.Write(j.Delimiter())
return err
})
}
func unmarshalJSONPb(data []byte, v interface{}) error {
d := json.NewDecoder(bytes.NewReader(data))
return decodeJSONPb(d, v)
}
func decodeJSONPb(d *json.Decoder, v interface{}) error {
p, ok := v.(proto.Message)
if !ok {
return decodeNonProtoField(d, v)
}
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
return unmarshaler.UnmarshalNext(d, p)
}
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", v)
}
for rv.Kind() == reflect.Ptr {
if rv.IsNil() {
rv.Set(reflect.New(rv.Type().Elem()))
}
if rv.Type().ConvertibleTo(typeProtoMessage) {
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
}
rv = rv.Elem()
}
if rv.Kind() == reflect.Map {
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
conv, ok := convFromType[rv.Type().Key().Kind()]
if !ok {
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
}
m := make(map[string]*json.RawMessage)
if err := d.Decode(&m); err != nil {
return err
}
for k, v := range m {
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
bk := result[0]
bv := reflect.New(rv.Type().Elem())
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
return err
}
rv.SetMapIndex(bk, bv.Elem())
}
return nil
}
if _, ok := rv.Interface().(protoEnum); ok {
var repr interface{}
if err := d.Decode(&repr); err != nil {
return err
}
switch repr.(type) {
case string:
// TODO(yugui) Should use proto.StructProperties?
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
case float64:
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
return nil
default:
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
}
}
return d.Decode(v)
}
type protoEnum interface {
fmt.Stringer
EnumDescriptor() ([]byte, []int)
}
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
// Delimiter for newline encoded JSON streams.
func (j *JSONPb) Delimiter() []byte {
return []byte("\n")
}
// allowUnknownFields helps not to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
var allowUnknownFields = true
// DisallowUnknownFields enables option in decoder (unmarshaller) to
// return an error when it finds an unknown field. This function must be
// called before using the JSON marshaller.
func DisallowUnknownFields() {
allowUnknownFields = false
}

View file

@ -0,0 +1,62 @@
package runtime
import (
"io"
"errors"
"github.com/golang/protobuf/proto"
"io/ioutil"
)
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
type ProtoMarshaller struct{}
// ContentType always returns "application/octet-stream".
func (*ProtoMarshaller) ContentType() string {
return "application/octet-stream"
}
// Marshal marshals "value" into Proto
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
message, ok := value.(proto.Message)
if !ok {
return nil, errors.New("unable to marshal non proto field")
}
return proto.Marshal(message)
}
// Unmarshal unmarshals proto "data" into "value"
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
message, ok := value.(proto.Message)
if !ok {
return errors.New("unable to unmarshal non proto field")
}
return proto.Unmarshal(data, message)
}
// NewDecoder returns a Decoder which reads proto stream from "reader".
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
return DecoderFunc(func(value interface{}) error {
buffer, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
return marshaller.Unmarshal(buffer, value)
})
}
// NewEncoder returns an Encoder which writes proto stream into "writer".
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
return EncoderFunc(func(value interface{}) error {
buffer, err := marshaller.Marshal(value)
if err != nil {
return err
}
_, err = writer.Write(buffer)
if err != nil {
return err
}
return nil
})
}

View file

@ -0,0 +1,55 @@
package runtime
import (
"io"
)
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
type Marshaler interface {
// Marshal marshals "v" into byte sequence.
Marshal(v interface{}) ([]byte, error)
// Unmarshal unmarshals "data" into "v".
// "v" must be a pointer value.
Unmarshal(data []byte, v interface{}) error
// NewDecoder returns a Decoder which reads byte sequence from "r".
NewDecoder(r io.Reader) Decoder
// NewEncoder returns an Encoder which writes bytes sequence into "w".
NewEncoder(w io.Writer) Encoder
// ContentType returns the Content-Type which this marshaler is responsible for.
ContentType() string
}
// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
// to set the Content-Type header on the response
type contentTypeMarshaler interface {
// ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
ContentTypeFromMessage(v interface{}) string
}
// Decoder decodes a byte sequence
type Decoder interface {
Decode(v interface{}) error
}
// Encoder encodes gRPC payloads / fields into byte sequence.
type Encoder interface {
Encode(v interface{}) error
}
// DecoderFunc adapts an decoder function into Decoder.
type DecoderFunc func(v interface{}) error
// Decode delegates invocations to the underlying function itself.
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
// EncoderFunc adapts an encoder function into Encoder
type EncoderFunc func(v interface{}) error
// Encode delegates invocations to the underlying function itself.
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
// Delimited defines the streaming delimiter.
type Delimited interface {
// Delimiter returns the record separator for the stream.
Delimiter() []byte
}

View file

@ -0,0 +1,99 @@
package runtime
import (
"errors"
"mime"
"net/http"
"google.golang.org/grpc/grpclog"
)
// MIMEWildcard is the fallback MIME type used for requests which do not match
// a registered MIME type.
const MIMEWildcard = "*"
var (
acceptHeader = http.CanonicalHeaderKey("Accept")
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
defaultMarshaler = &JSONPb{OrigName: true}
)
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
// If it isn't set (or the request Content-Type is empty), checks for "*".
// If there are multiple Content-Type headers set, choose the first one that it can
// exactly match in the registry.
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
for _, acceptVal := range r.Header[acceptHeader] {
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
outbound = m
break
}
}
for _, contentTypeVal := range r.Header[contentTypeHeader] {
contentType, _, err := mime.ParseMediaType(contentTypeVal)
if err != nil {
grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
continue
}
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
inbound = m
break
}
}
if inbound == nil {
inbound = mux.marshalers.mimeMap[MIMEWildcard]
}
if outbound == nil {
outbound = inbound
}
return inbound, outbound
}
// marshalerRegistry is a mapping from MIME types to Marshalers.
type marshalerRegistry struct {
mimeMap map[string]Marshaler
}
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
// MIME type).
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
if len(mime) == 0 {
return errors.New("empty MIME type")
}
m.mimeMap[mime] = marshaler
return nil
}
// makeMarshalerMIMERegistry returns a new registry of marshalers.
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
//
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
// with a "application/json" Content-Type.
// "*" can be used to match any Content-Type.
// This can be attached to a ServerMux with the marshaler option.
func makeMarshalerMIMERegistry() marshalerRegistry {
return marshalerRegistry{
mimeMap: map[string]Marshaler{
MIMEWildcard: defaultMarshaler,
},
}
}
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
// Marshalers to a MIME type in mux.
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
return func(mux *ServeMux) {
if err := mux.marshalers.add(mime, marshaler); err != nil {
panic(err)
}
}
}

View file

@ -0,0 +1,300 @@
package runtime
import (
"context"
"fmt"
"net/http"
"net/textproto"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
// a request is received with a URI path that does not match any registered
// service method.
//
// Since gRPC servers return an "Unimplemented" code for requests with an
// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
// ServeMux is a request multiplexer for grpc-gateway.
// It matches http requests to patterns and invokes the corresponding handler.
type ServeMux struct {
// handlers maps HTTP method to a list of handlers.
handlers map[string][]handler
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
streamErrorHandler StreamErrorHandlerFunc
protoErrorHandler ProtoErrorHandlerFunc
disablePathLengthFallback bool
lastMatchWins bool
}
// ServeMuxOption is an option that can be given to a ServeMux on construction.
type ServeMuxOption func(*ServeMux)
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
//
// forwardResponseOption is an option that will be called on the relevant context.Context,
// http.ResponseWriter, and proto.Message before every forwarded response.
//
// The message may be nil in the case where just a header is being sent.
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
}
}
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
// Configuring this will mean the generated swagger output is no longer correct, and it should be
// done with careful consideration.
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
return func(serveMux *ServeMux) {
currentQueryParser = queryParameterParser
}
}
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
type HeaderMatcherFunc func(string) (string, bool)
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
func DefaultHeaderMatcher(key string) (string, bool) {
key = textproto.CanonicalMIMEHeaderKey(key)
if isPermanentHTTPHeader(key) {
return MetadataPrefix + key, true
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
return key[len(MetadataHeaderPrefix):], true
}
return "", false
}
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
//
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.incomingHeaderMatcher = fn
}
}
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn
}
}
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
//
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
// is reading token from cookie and adding it in gRPC context.
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
}
}
// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler.
//
// This can be used to handle an error as general proto message defined by gRPC.
// When this option is used, the mux uses the configured error handler instead of HTTPError and
// OtherErrorHandler.
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.protoErrorHandler = fn
}
}
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
func WithDisablePathLengthFallback() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.disablePathLengthFallback = true
}
}
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
// error handler, which allows for customizing the error trailer for server-streaming
// calls.
//
// For stream errors that occur before any response has been written, the mux's
// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
// be handled differently: they must be included in the response body. The response body's
// final message will include the error details returned by the stream error handler.
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.streamErrorHandler = fn
}
}
// WithLastMatchWins returns a ServeMuxOption that will enable "last
// match wins" behavior, where if multiple path patterns match a
// request path, the last one defined in the .proto file will be used.
func WithLastMatchWins() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.lastMatchWins = true
}
}
// NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{
handlers: make(map[string][]handler),
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
marshalers: makeMarshalerMIMERegistry(),
streamErrorHandler: DefaultHTTPStreamErrorHandler,
}
for _, opt := range opts {
opt(serveMux)
}
if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
}
if serveMux.outgoingHeaderMatcher == nil {
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
}
return serveMux
}
// Handle associates "h" to the pair of HTTP method and path pattern.
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
if s.lastMatchWins {
s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
} else {
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
}
}
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
path := r.URL.Path
if !strings.HasPrefix(path, "/") {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
return
}
components := strings.Split(path[1:], "/")
l := len(components)
var verb string
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
return
} else if idx > 0 {
c := components[l-1]
components[l-1], verb = c[:idx], c[idx+1:]
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return
}
}
for _, h := range s.handlers[r.Method] {
pathParams, err := h.pat.Match(components, verb)
if err != nil {
continue
}
h.h(w, r, pathParams)
return
}
// lookup other methods to handle fallback from GET to POST and
// to determine if it is MethodNotAllowed or NotFound.
for m, handlers := range s.handlers {
if m == r.Method {
continue
}
for _, h := range handlers {
pathParams, err := h.pat.Match(components, verb)
if err != nil {
continue
}
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
if s.isPathLengthFallback(r) {
if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return
}
h.h(w, r, pathParams)
return
}
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
}
return
}
}
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
return s.forwardResponseOptions
}
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
}
type handler struct {
pat Pattern
h HandlerFunc
}

View file

@ -0,0 +1,262 @@
package runtime
import (
"errors"
"fmt"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc/grpclog"
)
var (
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
ErrNotMatch = errors.New("not match to the path pattern")
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
ErrInvalidPattern = errors.New("invalid pattern")
)
type op struct {
code utilities.OpCode
operand int
}
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
type Pattern struct {
// ops is a list of operations
ops []op
// pool is a constant pool indexed by the operands or vars.
pool []string
// vars is a list of variables names to be bound by this pattern
vars []string
// stacksize is the max depth of the stack
stacksize int
// tailLen is the length of the fixed-size segments after a deep wildcard
tailLen int
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
verb string
// assumeColonVerb indicates whether a path suffix after a final
// colon may only be interpreted as a verb.
assumeColonVerb bool
}
type patternOptions struct {
assumeColonVerb bool
}
// PatternOpt is an option for creating Patterns.
type PatternOpt func(*patternOptions)
// NewPattern returns a new Pattern from the given definition values.
// "ops" is a sequence of op codes. "pool" is a constant pool.
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
// "version" must be 1 for now.
// It returns an error if the given definition is invalid.
func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
options := patternOptions{
assumeColonVerb: true,
}
for _, o := range opts {
o(&options)
}
if version != 1 {
grpclog.Infof("unsupported version: %d", version)
return Pattern{}, ErrInvalidPattern
}
l := len(ops)
if l%2 != 0 {
grpclog.Infof("odd number of ops codes: %d", l)
return Pattern{}, ErrInvalidPattern
}
var (
typedOps []op
stack, maxstack int
tailLen int
pushMSeen bool
vars []string
)
for i := 0; i < l; i += 2 {
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
switch op.code {
case utilities.OpNop:
continue
case utilities.OpPush:
if pushMSeen {
tailLen++
}
stack++
case utilities.OpPushM:
if pushMSeen {
grpclog.Infof("pushM appears twice")
return Pattern{}, ErrInvalidPattern
}
pushMSeen = true
stack++
case utilities.OpLitPush:
if op.operand < 0 || len(pool) <= op.operand {
grpclog.Infof("negative literal index: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
if pushMSeen {
tailLen++
}
stack++
case utilities.OpConcatN:
if op.operand <= 0 {
grpclog.Infof("negative concat size: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
stack -= op.operand
if stack < 0 {
grpclog.Print("stack underflow")
return Pattern{}, ErrInvalidPattern
}
stack++
case utilities.OpCapture:
if op.operand < 0 || len(pool) <= op.operand {
grpclog.Infof("variable name index out of bound: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
v := pool[op.operand]
op.operand = len(vars)
vars = append(vars, v)
stack--
if stack < 0 {
grpclog.Infof("stack underflow")
return Pattern{}, ErrInvalidPattern
}
default:
grpclog.Infof("invalid opcode: %d", op.code)
return Pattern{}, ErrInvalidPattern
}
if maxstack < stack {
maxstack = stack
}
typedOps = append(typedOps, op)
}
return Pattern{
ops: typedOps,
pool: pool,
vars: vars,
stacksize: maxstack,
tailLen: tailLen,
verb: verb,
assumeColonVerb: options.assumeColonVerb,
}, nil
}
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
func MustPattern(p Pattern, err error) Pattern {
if err != nil {
grpclog.Fatalf("Pattern initialization failed: %v", err)
}
return p
}
// Match examines components if it matches to the Pattern.
// If it matches, the function returns a mapping from field paths to their captured values.
// If otherwise, the function returns an error.
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
if p.verb != verb {
if p.assumeColonVerb || p.verb != "" {
return nil, ErrNotMatch
}
if len(components) == 0 {
components = []string{":" + verb}
} else {
components = append([]string{}, components...)
components[len(components)-1] += ":" + verb
}
verb = ""
}
var pos int
stack := make([]string, 0, p.stacksize)
captured := make([]string, len(p.vars))
l := len(components)
for _, op := range p.ops {
switch op.code {
case utilities.OpNop:
continue
case utilities.OpPush, utilities.OpLitPush:
if pos >= l {
return nil, ErrNotMatch
}
c := components[pos]
if op.code == utilities.OpLitPush {
if lit := p.pool[op.operand]; c != lit {
return nil, ErrNotMatch
}
}
stack = append(stack, c)
pos++
case utilities.OpPushM:
end := len(components)
if end < pos+p.tailLen {
return nil, ErrNotMatch
}
end -= p.tailLen
stack = append(stack, strings.Join(components[pos:end], "/"))
pos = end
case utilities.OpConcatN:
n := op.operand
l := len(stack) - n
stack = append(stack[:l], strings.Join(stack[l:], "/"))
case utilities.OpCapture:
n := len(stack) - 1
captured[op.operand] = stack[n]
stack = stack[:n]
}
}
if pos < l {
return nil, ErrNotMatch
}
bindings := make(map[string]string)
for i, val := range captured {
bindings[p.vars[i]] = val
}
return bindings, nil
}
// Verb returns the verb part of the Pattern.
func (p Pattern) Verb() string { return p.verb }
func (p Pattern) String() string {
var stack []string
for _, op := range p.ops {
switch op.code {
case utilities.OpNop:
continue
case utilities.OpPush:
stack = append(stack, "*")
case utilities.OpLitPush:
stack = append(stack, p.pool[op.operand])
case utilities.OpPushM:
stack = append(stack, "**")
case utilities.OpConcatN:
n := op.operand
l := len(stack) - n
stack = append(stack[:l], strings.Join(stack[l:], "/"))
case utilities.OpCapture:
n := len(stack) - 1
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
}
}
segs := strings.Join(stack, "/")
if p.verb != "" {
return fmt.Sprintf("/%s:%s", segs, p.verb)
}
return "/" + segs
}
// AssumeColonVerbOpt indicates whether a path suffix after a final
// colon may only be interpreted as a verb.
func AssumeColonVerbOpt(val bool) PatternOpt {
return PatternOpt(func(o *patternOptions) {
o.assumeColonVerb = val
})
}

View file

@ -0,0 +1,80 @@
package runtime
import (
"github.com/golang/protobuf/proto"
)
// StringP returns a pointer to a string whose pointee is same as the given string value.
func StringP(val string) (*string, error) {
return proto.String(val), nil
}
// BoolP parses the given string representation of a boolean value,
// and returns a pointer to a bool whose value is same as the parsed value.
func BoolP(val string) (*bool, error) {
b, err := Bool(val)
if err != nil {
return nil, err
}
return proto.Bool(b), nil
}
// Float64P parses the given string representation of a floating point number,
// and returns a pointer to a float64 whose value is same as the parsed number.
func Float64P(val string) (*float64, error) {
f, err := Float64(val)
if err != nil {
return nil, err
}
return proto.Float64(f), nil
}
// Float32P parses the given string representation of a floating point number,
// and returns a pointer to a float32 whose value is same as the parsed number.
func Float32P(val string) (*float32, error) {
f, err := Float32(val)
if err != nil {
return nil, err
}
return proto.Float32(f), nil
}
// Int64P parses the given string representation of an integer
// and returns a pointer to a int64 whose value is same as the parsed integer.
func Int64P(val string) (*int64, error) {
i, err := Int64(val)
if err != nil {
return nil, err
}
return proto.Int64(i), nil
}
// Int32P parses the given string representation of an integer
// and returns a pointer to a int32 whose value is same as the parsed integer.
func Int32P(val string) (*int32, error) {
i, err := Int32(val)
if err != nil {
return nil, err
}
return proto.Int32(i), err
}
// Uint64P parses the given string representation of an integer
// and returns a pointer to a uint64 whose value is same as the parsed integer.
func Uint64P(val string) (*uint64, error) {
i, err := Uint64(val)
if err != nil {
return nil, err
}
return proto.Uint64(i), err
}
// Uint32P parses the given string representation of an integer
// and returns a pointer to a uint32 whose value is same as the parsed integer.
func Uint32P(val string) (*uint32, error) {
i, err := Uint32(val)
if err != nil {
return nil, err
}
return proto.Uint32(i), err
}

View file

@ -0,0 +1,106 @@
package runtime
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/ptypes/any"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
// a proto struct used to represent error at the end of a stream.
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
// StreamError is the payload for the final message in a server stream in the event that the server returns an
// error after a response message has already been sent.
type StreamError internal.StreamError
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a Status message marshaled by a Marshaler.
//
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
buf, merr := marshaler.Marshal(s.Proto())
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
handleForwardResponseTrailerHeader(w, md)
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
}
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
// default logic.
//
// It extracts the gRPC status from err if possible. The fields of the status are
// used to populate the returned StreamError, and the HTTP status code is derived
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
grpcCode := codes.Unknown
grpcMessage := err.Error()
var grpcDetails []*any.Any
if s, ok := status.FromError(err); ok {
grpcCode = s.Code()
grpcMessage = s.Message()
grpcDetails = s.Proto().GetDetails()
}
httpCode := HTTPStatusFromCode(grpcCode)
return &StreamError{
GrpcCode: int32(grpcCode),
HttpCode: int32(httpCode),
Message: grpcMessage,
HttpStatus: http.StatusText(httpCode),
Details: grpcDetails,
}
}

View file

@ -0,0 +1,406 @@
package runtime
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc/grpclog"
)
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
type defaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
// It instantiates missing protobuf fields as it goes.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg, fieldPath, []string{value})
}
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
m := reflect.ValueOf(msg)
if m.Kind() != reflect.Ptr {
return fmt.Errorf("unexpected type %T: %v", msg, msg)
}
var props *proto.Properties
m = m.Elem()
for i, fieldName := range fieldPath {
isLast := i == len(fieldPath)-1
if !isLast && m.Kind() != reflect.Struct {
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
}
var f reflect.Value
var err error
f, props, err = fieldByProtoName(m, fieldName)
if err != nil {
return err
} else if !f.IsValid() {
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
return nil
}
switch f.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
m = f
case reflect.Slice:
if !isLast {
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
}
// Handle []byte
if f.Type().Elem().Kind() == reflect.Uint8 {
m = f
break
}
return populateRepeatedField(f, values, props)
case reflect.Ptr:
if f.IsNil() {
m = reflect.New(f.Type().Elem())
f.Set(m.Convert(f.Type()))
}
m = f.Elem()
continue
case reflect.Struct:
m = f
continue
case reflect.Map:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
return populateMapField(f, values, props)
default:
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
}
}
switch len(values) {
case 0:
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
case 1:
default:
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
}
return populateField(m, values[0], props)
}
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
props := proto.GetProperties(m.Type())
// look up field name in oneof map
for _, op := range props.OneofTypes {
if name == op.Prop.OrigName || name == op.Prop.JSONName {
v := reflect.New(op.Type.Elem())
field := m.Field(op.Field)
if !field.IsNil() {
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
}
field.Set(v)
return v.Elem().Field(0), op.Prop, nil
}
}
for _, p := range props.Prop {
if p.OrigName == name {
return m.FieldByName(p.Name), p, nil
}
if p.JSONName == name {
return m.FieldByName(p.Name), p, nil
}
}
return reflect.Value{}, nil, nil
}
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
}
key, value := values[0], values[1]
keyType := f.Type().Key()
valueType := f.Type().Elem()
if f.IsNil() {
f.Set(reflect.MakeMap(f.Type()))
}
keyConv, ok := convFromType[keyType.Kind()]
if !ok {
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
}
valueConv, ok := convFromType[valueType.Kind()]
if !ok {
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
}
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
if err := keyV[1].Interface(); err != nil {
return err.(error)
}
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := valueV[1].Interface(); err != nil {
return err.(error)
}
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
return nil
}
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
elemType := f.Type().Elem()
// is the destination field a slice of an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnumRepeated(f, values, enumValMap)
}
conv, ok := convFromType[elemType.Kind()]
if !ok {
return fmt.Errorf("unsupported field type %s", elemType)
}
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
}
return nil
}
func populateField(f reflect.Value, value string, props *proto.Properties) error {
i := f.Addr().Interface()
// Handle protobuf well known types
var name string
switch m := i.(type) {
case interface{ XXX_WellKnownType() string }:
name = m.XXX_WellKnownType()
case proto.Message:
const wktPrefix = "google.protobuf."
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
name = fullName[len(wktPrefix):]
}
}
switch name {
case "Timestamp":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
return nil
case "Duration":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
ns := d.Nanoseconds()
s := ns / 1e9
ns %= 1e9
f.FieldByName("Seconds").SetInt(s)
f.FieldByName("Nanos").SetInt(ns)
return nil
case "DoubleValue":
fallthrough
case "FloatValue":
float64Val, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetFloat(float64Val)
return nil
case "Int64Value":
fallthrough
case "Int32Value":
int64Val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetInt(int64Val)
return nil
case "UInt64Value":
fallthrough
case "UInt32Value":
uint64Val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetUint(uint64Val)
return nil
case "BoolValue":
if value == "true" {
f.FieldByName("Value").SetBool(true)
} else if value == "false" {
f.FieldByName("Value").SetBool(false)
} else {
return fmt.Errorf("bad BoolValue: %s", value)
}
return nil
case "StringValue":
f.FieldByName("Value").SetString(value)
return nil
case "BytesValue":
bytesVal, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return fmt.Errorf("bad BytesValue: %s", value)
}
f.FieldByName("Value").SetBytes(bytesVal)
return nil
case "FieldMask":
p := f.FieldByName("Paths")
for _, v := range strings.Split(value, ",") {
if v != "" {
p.Set(reflect.Append(p, reflect.ValueOf(v)))
}
}
return nil
}
// Handle Time and Duration stdlib types
switch t := i.(type) {
case *time.Time:
pt, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
*t = pt
return nil
case *time.Duration:
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
*t = d
return nil
}
// is the destination field an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnum(f, value, enumValMap)
}
conv, ok := convFromType[f.Kind()]
if !ok {
return fmt.Errorf("field type %T is not supported in query parameters", i)
}
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Set(result[0].Convert(f.Type()))
return nil
}
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
// see if it's an enumeration string
if enumVal, ok := enumValMap[value]; ok {
return reflect.ValueOf(enumVal).Convert(t), nil
}
// check for an integer that matches an enumeration value
eVal, err := strconv.Atoi(value)
if err != nil {
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
for _, v := range enumValMap {
if v == int32(eVal) {
return reflect.ValueOf(eVal).Convert(t), nil
}
}
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
cval, err := convertEnum(value, f.Type(), enumValMap)
if err != nil {
return err
}
f.Set(cval)
return nil
}
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
elemType := f.Type().Elem()
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result, err := convertEnum(v, elemType, enumValMap)
if err != nil {
return err
}
f.Index(i).Set(result)
}
return nil
}
var (
convFromType = map[reflect.Kind]reflect.Value{
reflect.String: reflect.ValueOf(String),
reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
reflect.Float32: reflect.ValueOf(Float32),
reflect.Int64: reflect.ValueOf(Int64),
reflect.Int32: reflect.ValueOf(Int32),
reflect.Uint64: reflect.ValueOf(Uint64),
reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
}
)

View file

@ -0,0 +1,21 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"pattern.go",
"readerfactory.go",
"trie.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["trie_test.go"],
embed = [":go_default_library"],
)

View file

@ -0,0 +1,2 @@
// Package utilities provides members for internal use in grpc-gateway.
package utilities

View file

@ -0,0 +1,22 @@
package utilities
// An OpCode is a opcode of compiled path patterns.
type OpCode int
// These constants are the valid values of OpCode.
const (
// OpNop does nothing
OpNop = OpCode(iota)
// OpPush pushes a component to stack
OpPush
// OpLitPush pushes a component to stack if it matches to the literal
OpLitPush
// OpPushM concatenates the remaining components and pushes it to stack
OpPushM
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
OpConcatN
// OpCapture pops an item and binds it to the variable
OpCapture
// OpEnd is the least positive invalid opcode.
OpEnd
)

View file

@ -0,0 +1,20 @@
package utilities
import (
"bytes"
"io"
"io/ioutil"
)
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
// at the start of the stream
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return func() io.Reader {
return bytes.NewReader(b)
}, nil
}

View file

@ -0,0 +1,177 @@
package utilities
import (
"sort"
)
// DoubleArray is a Double Array implementation of trie on sequences of strings.
type DoubleArray struct {
// Encoding keeps an encoding from string to int
Encoding map[string]int
// Base is the base array of Double Array
Base []int
// Check is the check array of Double Array
Check []int
}
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
func NewDoubleArray(seqs [][]string) *DoubleArray {
da := &DoubleArray{Encoding: make(map[string]int)}
if len(seqs) == 0 {
return da
}
encoded := registerTokens(da, seqs)
sort.Sort(byLex(encoded))
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
addSeqs(da, encoded, 0, root)
for i := len(da.Base); i > 0; i-- {
if da.Check[i-1] != 0 {
da.Base = da.Base[:i]
da.Check = da.Check[:i]
break
}
}
return da
}
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
var result [][]int
for _, seq := range seqs {
var encoded []int
for _, token := range seq {
if _, ok := da.Encoding[token]; !ok {
da.Encoding[token] = len(da.Encoding)
}
encoded = append(encoded, da.Encoding[token])
}
result = append(result, encoded)
}
for i := range result {
result[i] = append(result[i], len(da.Encoding))
}
return result
}
type node struct {
row, col int
left, right int
}
func (n node) value(seqs [][]int) int {
return seqs[n.row][n.col]
}
func (n node) children(seqs [][]int) []*node {
var result []*node
lastVal := int(-1)
last := new(node)
for i := n.left; i < n.right; i++ {
if lastVal == seqs[i][n.col+1] {
continue
}
last.right = i
last = &node{
row: i,
col: n.col + 1,
left: i,
}
result = append(result, last)
}
last.right = n.right
return result
}
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
ensureSize(da, pos)
children := n.children(seqs)
var i int
for i = 1; ; i++ {
ok := func() bool {
for _, child := range children {
code := child.value(seqs)
j := i + code
ensureSize(da, j)
if da.Check[j] != 0 {
return false
}
}
return true
}()
if ok {
break
}
}
da.Base[pos] = i
for _, child := range children {
code := child.value(seqs)
j := i + code
da.Check[j] = pos + 1
}
terminator := len(da.Encoding)
for _, child := range children {
code := child.value(seqs)
if code == terminator {
continue
}
j := i + code
addSeqs(da, seqs, j, *child)
}
}
func ensureSize(da *DoubleArray, i int) {
for i >= len(da.Base) {
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
}
}
type byLex [][]int
func (l byLex) Len() int { return len(l) }
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l byLex) Less(i, j int) bool {
si := l[i]
sj := l[j]
var k int
for k = 0; k < len(si) && k < len(sj); k++ {
if si[k] < sj[k] {
return true
}
if si[k] > sj[k] {
return false
}
}
if k < len(sj) {
return true
}
return false
}
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
if len(da.Base) == 0 {
return false
}
var i int
for _, t := range seq {
code, ok := da.Encoding[t]
if !ok {
break
}
j := da.Base[i] + code
if len(da.Check) <= j || da.Check[j] != i+1 {
break
}
i = j
}
j := da.Base[i] + len(da.Encoding)
if len(da.Check) <= j || da.Check[j] != i+1 {
return false
}
return true
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2016, gRPC Ecosystem
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of grpc-opentracing nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,23 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the GRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of GRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of GRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of GRPC or any code incorporated within this
implementation of GRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of GRPC
shall terminate as of the date such litigation is filed.
Status API Training Shop Blog About

View file

@ -1,57 +0,0 @@
# OpenTracing support for gRPC in Go
The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based
systems in Go.
## Installation
```
go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc
```
## Documentation
See the basic usage examples below and the [package documentation on
godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc).
## Client-side usage example
Wherever you call `grpc.Dial`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Set up a connection to the server peer.
conn, err := grpc.Dial(
address,
... // other options
grpc.WithUnaryInterceptor(
otgrpc.OpenTracingClientInterceptor(tracer)),
grpc.WithStreamInterceptor(
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
// All future RPC activity involving `conn` will be automatically traced.
```
## Server-side usage example
Wherever you call `grpc.NewServer`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Initialize the gRPC server.
s := grpc.NewServer(
... // other options
grpc.UnaryInterceptor(
otgrpc.OpenTracingServerInterceptor(tracer)),
grpc.StreamInterceptor(
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
// All future RPC activity involving `s` will be automatically traced.
```

View file

@ -1,239 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"io"
"runtime"
"sync/atomic"
)
// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable
// for use in a grpc.Dial call.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
method string,
req, resp interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) error {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) {
return invoker(ctx, method, req, resp, cc, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
defer clientSpan.Finish()
ctx = injectSpanContext(ctx, tracer, clientSpan)
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC request", req))
}
err = invoker(ctx, method, req, resp, cc, opts...)
if err == nil {
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC response", resp))
}
} else {
SetSpanTags(clientSpan, err, true)
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, req, resp, err)
}
return err
}
}
// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating
// a single span to correspond to the lifetime of the RPC's stream.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
desc *grpc.StreamDesc,
cc *grpc.ClientConn,
method string,
streamer grpc.Streamer,
opts ...grpc.CallOption,
) (grpc.ClientStream, error) {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) {
return streamer(ctx, desc, cc, method, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
ctx = injectSpanContext(ctx, tracer, clientSpan)
cs, err := streamer(ctx, desc, cc, method, opts...)
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
clientSpan.Finish()
return cs, err
}
return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil
}
}
func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream {
finishChan := make(chan struct{})
isFinished := new(int32)
*isFinished = 0
finishFunc := func(err error) {
// The current OpenTracing specification forbids finishing a span more than
// once. Since we have multiple code paths that could concurrently call
// `finishFunc`, we need to add some sort of synchronization to guard against
// multiple finishing.
if !atomic.CompareAndSwapInt32(isFinished, 0, 1) {
return
}
close(finishChan)
defer clientSpan.Finish()
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, nil, nil, err)
}
}
go func() {
select {
case <-finishChan:
// The client span is being finished by another code path; hence, no
// action is necessary.
case <-cs.Context().Done():
finishFunc(cs.Context().Err())
}
}()
otcs := &openTracingClientStream{
ClientStream: cs,
desc: desc,
finishFunc: finishFunc,
}
// The `ClientStream` interface allows one to omit calling `Recv` if it's
// known that the result will be `io.EOF`. See
// http://stackoverflow.com/q/42915337
// In such cases, there's nothing that triggers the span to finish. We,
// therefore, set a finalizer so that the span and the context goroutine will
// at least be cleaned up when the garbage collector is run.
runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) {
otcs.finishFunc(nil)
})
return otcs
}
type openTracingClientStream struct {
grpc.ClientStream
desc *grpc.StreamDesc
finishFunc func(error)
}
func (cs *openTracingClientStream) Header() (metadata.MD, error) {
md, err := cs.ClientStream.Header()
if err != nil {
cs.finishFunc(err)
}
return md, err
}
func (cs *openTracingClientStream) SendMsg(m interface{}) error {
err := cs.ClientStream.SendMsg(m)
if err != nil {
cs.finishFunc(err)
}
return err
}
func (cs *openTracingClientStream) RecvMsg(m interface{}) error {
err := cs.ClientStream.RecvMsg(m)
if err == io.EOF {
cs.finishFunc(nil)
return err
} else if err != nil {
cs.finishFunc(err)
return err
}
if !cs.desc.ServerStreams {
cs.finishFunc(nil)
}
return err
}
func (cs *openTracingClientStream) CloseSend() error {
err := cs.ClientStream.CloseSend()
if err != nil {
cs.finishFunc(err)
}
return err
}
func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
md = metadata.New(nil)
} else {
md = md.Copy()
}
mdWriter := metadataReaderWriter{md}
err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter)
// We have no better place to record an error than the Span itself :-/
if err != nil {
clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err))
}
return metadata.NewOutgoingContext(ctx, md)
}

View file

@ -1,69 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// A Class is a set of types of outcomes (including errors) that will often
// be handled in the same way.
type Class string
const (
Unknown Class = "0xx"
// Success represents outcomes that achieved the desired results.
Success Class = "2xx"
// ClientError represents errors that were the client's fault.
ClientError Class = "4xx"
// ServerError represents errors that were the server's fault.
ServerError Class = "5xx"
)
// ErrorClass returns the class of the given error
func ErrorClass(err error) Class {
if s, ok := status.FromError(err); ok {
switch s.Code() {
// Success or "success"
case codes.OK, codes.Canceled:
return Success
// Client errors
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists,
codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition,
codes.OutOfRange:
return ClientError
// Server errors
case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted,
codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss:
return ServerError
// Not sure
case codes.Unknown:
fallthrough
default:
return Unknown
}
}
return Unknown
}
// SetSpanTags sets one or more tags on the given span according to the
// error.
func SetSpanTags(span opentracing.Span, err error, client bool) {
c := ErrorClass(err)
code := codes.Unknown
if s, ok := status.FromError(err); ok {
code = s.Code()
}
span.SetTag("response_code", code)
span.SetTag("response_class", c)
if err == nil {
return
}
if client || c == ServerError {
ext.Error.Set(span, true)
}
}

View file

@ -1,76 +0,0 @@
package otgrpc
import "github.com/opentracing/opentracing-go"
// Option instances may be used in OpenTracing(Server|Client)Interceptor
// initialization.
//
// See this post about the "functional options" pattern:
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Option func(o *options)
// LogPayloads returns an Option that tells the OpenTracing instrumentation to
// try to log application payloads in both directions.
func LogPayloads() Option {
return func(o *options) {
o.logPayloads = true
}
}
// SpanInclusionFunc provides an optional mechanism to decide whether or not
// to trace a given gRPC call. Return true to create a Span and initiate
// tracing, false to not create a Span and not trace.
//
// parentSpanCtx may be nil if no parent could be extraction from either the Go
// context.Context (on the client) or the RPC (on the server).
type SpanInclusionFunc func(
parentSpanCtx opentracing.SpanContext,
method string,
req, resp interface{}) bool
// IncludingSpans binds a IncludeSpanFunc to the options
func IncludingSpans(inclusionFunc SpanInclusionFunc) Option {
return func(o *options) {
o.inclusionFunc = inclusionFunc
}
}
// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add
// arbitrary tags/logs/etc to the opentracing.Span associated with client
// and/or server RPCs.
type SpanDecoratorFunc func(
span opentracing.Span,
method string,
req, resp interface{},
grpcError error)
// SpanDecorator binds a function that decorates gRPC Spans.
func SpanDecorator(decorator SpanDecoratorFunc) Option {
return func(o *options) {
o.decorator = decorator
}
}
// The internal-only options struct. Obviously overkill at the moment; but will
// scale well as production use dictates other configuration and tuning
// parameters.
type options struct {
logPayloads bool
decorator SpanDecoratorFunc
// May be nil.
inclusionFunc SpanInclusionFunc
}
// newOptions returns the default options.
func newOptions() *options {
return &options{
logPayloads: false,
inclusionFunc: nil,
}
}
func (o *options) apply(opts ...Option) {
for _, opt := range opts {
opt(o)
}
}

View file

@ -1,5 +0,0 @@
// Package otgrpc provides OpenTracing support for any gRPC client or server.
//
// See the README for simple usage examples:
// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md
package otgrpc

View file

@ -1,141 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// For example:
//
// s := grpc.NewServer(
// ..., // (existing ServerOptions)
// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer)))
//
// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC
// metadata; if found, the server span will act as the ChildOf that RPC
// SpanContext.
//
// Root or not, the server Span will be embedded in the context.Context for the
// application-specific gRPC handler(s) to access.
func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler,
) (resp interface{}, err error) {
spanContext, err := extractSpanContext(ctx, tracer)
if err != nil && err != opentracing.ErrSpanContextNotFound {
// TODO: establish some sort of error reporting mechanism here. We
// don't know where to put such an error and must rely on Tracer
// implementations to do something appropriate for the time being.
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) {
return handler(ctx, req)
}
serverSpan := tracer.StartSpan(
info.FullMethod,
ext.RPCServerOption(spanContext),
gRPCComponentTag,
)
defer serverSpan.Finish()
ctx = opentracing.ContextWithSpan(ctx, serverSpan)
if otgrpcOpts.logPayloads {
serverSpan.LogFields(log.Object("gRPC request", req))
}
resp, err = handler(ctx, req)
if err == nil {
if otgrpcOpts.logPayloads {
serverSpan.LogFields(log.Object("gRPC response", resp))
}
} else {
SetSpanTags(serverSpan, err, false)
serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err)
}
return resp, err
}
}
// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by
// creating a single span to correspond to the lifetime of the RPC's stream.
//
// For example:
//
// s := grpc.NewServer(
// ..., // (existing ServerOptions)
// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer)))
//
// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC
// metadata; if found, the server span will act as the ChildOf that RPC
// SpanContext.
//
// Root or not, the server Span will be embedded in the context.Context for the
// application-specific gRPC handler(s) to access.
func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
spanContext, err := extractSpanContext(ss.Context(), tracer)
if err != nil && err != opentracing.ErrSpanContextNotFound {
// TODO: establish some sort of error reporting mechanism here. We
// don't know where to put such an error and must rely on Tracer
// implementations to do something appropriate for the time being.
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) {
return handler(srv, ss)
}
serverSpan := tracer.StartSpan(
info.FullMethod,
ext.RPCServerOption(spanContext),
gRPCComponentTag,
)
defer serverSpan.Finish()
ss = &openTracingServerStream{
ServerStream: ss,
ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan),
}
err = handler(srv, ss)
if err != nil {
SetSpanTags(serverSpan, err, false)
serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err)
}
return err
}
}
type openTracingServerStream struct {
grpc.ServerStream
ctx context.Context
}
func (ss *openTracingServerStream) Context() context.Context {
return ss.ctx
}
func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
md = metadata.New(nil)
}
return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md})
}

Some files were not shown because too many files have changed in this diff Show more