Merge pull request #340 from thaJeztah/19.03_backport_bump_grpc

[19.03 backport] bump google.golang.org/grpc v1.23.0 (CVE-2019-9512, CVE-2019-9514, CVE-2019-9515)
This commit is contained in:
Andrew Hsu 2019-09-23 09:32:43 -07:00 committed by GitHub
commit c27f11fa2e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
44 changed files with 1579 additions and 635 deletions

View file

@ -74,7 +74,7 @@ github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b0
# get go-zfs packages
github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb
google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0
# The version of runc should match the version that is used by the containerd
# version that is used. If you need to update runc, open a pull request in
@ -127,7 +127,7 @@ github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8b
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
github.com/containerd/go-runc 7d11b49dc0769f6dbb0d1b19f3d48524d1bad9ad
github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
github.com/containerd/ttrpc 699c4e40d1e7416e08bf7019c7ce2e9beced4636
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f
github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
# cluster

View file

@ -18,7 +18,6 @@ package ttrpc
import (
"bufio"
"context"
"encoding/binary"
"io"
"net"
@ -98,7 +97,7 @@ func newChannel(conn net.Conn) *channel {
// returned will be valid and caller should send that along to
// the correct consumer. The bytes on the underlying channel
// will be discarded.
func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) {
func (ch *channel) recv() (messageHeader, []byte, error) {
mh, err := readMessageHeader(ch.hrbuf[:], ch.br)
if err != nil {
return messageHeader{}, nil, err
@ -120,7 +119,7 @@ func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) {
return mh, p, nil
}
func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error {
func (ch *channel) send(streamID uint32, t messageType, p []byte) error {
if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil {
return err
}

View file

@ -29,6 +29,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@ -36,36 +37,52 @@ import (
// closed.
var ErrClosed = errors.New("ttrpc: closed")
// Client for a ttrpc server
type Client struct {
codec codec
conn net.Conn
channel *channel
calls chan *callRequest
closed chan struct{}
closeOnce sync.Once
closeFunc func()
done chan struct{}
err error
ctx context.Context
closed func()
closeOnce sync.Once
userCloseFunc func()
errOnce sync.Once
err error
interceptor UnaryClientInterceptor
}
// ClientOpts configures a client
type ClientOpts func(c *Client)
// WithOnClose sets the close func whenever the client's Close() method is called
func WithOnClose(onClose func()) ClientOpts {
return func(c *Client) {
c.closeFunc = onClose
c.userCloseFunc = onClose
}
}
// WithUnaryClientInterceptor sets the provided client interceptor
func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts {
return func(c *Client) {
c.interceptor = i
}
}
func NewClient(conn net.Conn, opts ...ClientOpts) *Client {
ctx, cancel := context.WithCancel(context.Background())
c := &Client{
codec: codec{},
conn: conn,
channel: newChannel(conn),
calls: make(chan *callRequest),
closed: make(chan struct{}),
done: make(chan struct{}),
closeFunc: func() {},
codec: codec{},
conn: conn,
channel: newChannel(conn),
calls: make(chan *callRequest),
closed: cancel,
ctx: ctx,
userCloseFunc: func() {},
interceptor: defaultClientInterceptor,
}
for _, o := range opts {
@ -99,11 +116,18 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int
cresp = &Response{}
)
if metadata, ok := GetMetadata(ctx); ok {
metadata.setRequest(creq)
}
if dl, ok := ctx.Deadline(); ok {
creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds()
}
if err := c.dispatch(ctx, creq, cresp); err != nil {
info := &UnaryClientInfo{
FullMethod: fullPath(service, method),
}
if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil {
return err
}
@ -111,11 +135,10 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int
return err
}
if cresp.Status == nil {
return errors.New("no status provided on response")
if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) {
return status.ErrorProto(cresp.Status)
}
return status.ErrorProto(cresp.Status)
return nil
}
func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
@ -131,8 +154,8 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err
case <-ctx.Done():
return ctx.Err()
case c.calls <- call:
case <-c.done:
return c.err
case <-c.ctx.Done():
return c.error()
}
select {
@ -140,16 +163,15 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err
return ctx.Err()
case err := <-errs:
return filterCloseErr(err)
case <-c.done:
return c.err
case <-c.ctx.Done():
return c.error()
}
}
func (c *Client) Close() error {
c.closeOnce.Do(func() {
close(c.closed)
c.closed()
})
return nil
}
@ -159,51 +181,82 @@ type message struct {
err error
}
func (c *Client) run() {
var (
streamID uint32 = 1
waiters = make(map[uint32]*callRequest)
calls = c.calls
incoming = make(chan *message)
shutdown = make(chan struct{})
shutdownErr error
)
type receiver struct {
wg *sync.WaitGroup
messages chan *message
err error
}
go func() {
defer close(shutdown)
func (r *receiver) run(ctx context.Context, c *channel) {
defer r.wg.Done()
// start one more goroutine to recv messages without blocking.
for {
mh, p, err := c.channel.recv(context.TODO())
for {
select {
case <-ctx.Done():
r.err = ctx.Err()
return
default:
mh, p, err := c.recv()
if err != nil {
_, ok := status.FromError(err)
if !ok {
// treat all errors that are not an rpc status as terminal.
// all others poison the connection.
shutdownErr = err
r.err = filterCloseErr(err)
return
}
}
select {
case incoming <- &message{
case r.messages <- &message{
messageHeader: mh,
p: p[:mh.Length],
err: err,
}:
case <-c.done:
case <-ctx.Done():
r.err = ctx.Err()
return
}
}
}()
}
}
defer c.conn.Close()
defer close(c.done)
defer c.closeFunc()
func (c *Client) run() {
var (
streamID uint32 = 1
waiters = make(map[uint32]*callRequest)
calls = c.calls
incoming = make(chan *message)
receiversDone = make(chan struct{})
wg sync.WaitGroup
)
// broadcast the shutdown error to the remaining waiters.
abortWaiters := func(wErr error) {
for _, waiter := range waiters {
waiter.errs <- wErr
}
}
recv := &receiver{
wg: &wg,
messages: incoming,
}
wg.Add(1)
go func() {
wg.Wait()
close(receiversDone)
}()
go recv.run(c.ctx, c.channel)
defer func() {
c.conn.Close()
c.userCloseFunc()
}()
for {
select {
case call := <-calls:
if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil {
if err := c.send(streamID, messageTypeRequest, call.req); err != nil {
call.errs <- err
continue
}
@ -219,41 +272,42 @@ func (c *Client) run() {
call.errs <- c.recv(call.resp, msg)
delete(waiters, msg.StreamID)
case <-shutdown:
if shutdownErr != nil {
shutdownErr = filterCloseErr(shutdownErr)
} else {
shutdownErr = ErrClosed
}
shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down")
c.err = shutdownErr
for _, waiter := range waiters {
waiter.errs <- shutdownErr
case <-receiversDone:
// all the receivers have exited
if recv.err != nil {
c.setError(recv.err)
}
// don't return out, let the close of the context trigger the abort of waiters
c.Close()
return
case <-c.closed:
if c.err == nil {
c.err = ErrClosed
}
// broadcast the shutdown error to the remaining waiters.
for _, waiter := range waiters {
waiter.errs <- c.err
}
case <-c.ctx.Done():
abortWaiters(c.error())
return
}
}
}
func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error {
func (c *Client) error() error {
c.errOnce.Do(func() {
if c.err == nil {
c.err = ErrClosed
}
})
return c.err
}
func (c *Client) setError(err error) {
c.errOnce.Do(func() {
c.err = err
})
}
func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error {
p, err := c.codec.Marshal(msg)
if err != nil {
return err
}
return c.channel.send(ctx, streamID, mtype, p)
return c.channel.send(streamID, mtype, p)
}
func (c *Client) recv(resp *Response, msg *message) error {
@ -274,22 +328,21 @@ func (c *Client) recv(resp *Response, msg *message) error {
//
// This purposely ignores errors with a wrapped cause.
func filterCloseErr(err error) error {
if err == nil {
switch {
case err == nil:
return nil
}
if err == io.EOF {
case err == io.EOF:
return ErrClosed
}
if strings.Contains(err.Error(), "use of closed network connection") {
case errors.Cause(err) == io.EOF:
return ErrClosed
}
// if we have an epipe on a write, we cast to errclosed
if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" {
if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE {
return ErrClosed
case strings.Contains(err.Error(), "use of closed network connection"):
return ErrClosed
default:
// if we have an epipe on a write, we cast to errclosed
if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" {
if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE {
return ErrClosed
}
}
}

View file

@ -19,9 +19,11 @@ package ttrpc
import "github.com/pkg/errors"
type serverConfig struct {
handshaker Handshaker
handshaker Handshaker
interceptor UnaryServerInterceptor
}
// ServerOpt for configuring a ttrpc server
type ServerOpt func(*serverConfig) error
// WithServerHandshaker can be passed to NewServer to ensure that the
@ -37,3 +39,14 @@ func WithServerHandshaker(handshaker Handshaker) ServerOpt {
return nil
}
}
// WithUnaryServerInterceptor sets the provided interceptor on the server
func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt {
return func(c *serverConfig) error {
if c.interceptor != nil {
return errors.New("only one interceptor allowed per server")
}
c.interceptor = i
return nil
}
}

50
vendor/github.com/containerd/ttrpc/interceptor.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ttrpc
import "context"
// UnaryServerInfo provides information about the server request
type UnaryServerInfo struct {
FullMethod string
}
// UnaryClientInfo provides information about the client request
type UnaryClientInfo struct {
FullMethod string
}
// Unmarshaler contains the server request data and allows it to be unmarshaled
// into a concrete type
type Unmarshaler func(interface{}) error
// Invoker invokes the client's request and response from the ttrpc server
type Invoker func(context.Context, *Request, *Response) error
// UnaryServerInterceptor specifies the interceptor function for server request/response
type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error)
// UnaryClientInterceptor specifies the interceptor function for client request/response
type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error
func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) {
return method(ctx, unmarshal)
}
func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error {
return invoker(ctx, req, resp)
}

107
vendor/github.com/containerd/ttrpc/metadata.go generated vendored Normal file
View file

@ -0,0 +1,107 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ttrpc
import (
"context"
"strings"
)
// MD is the user type for ttrpc metadata
type MD map[string][]string
// Get returns the metadata for a given key when they exist.
// If there is no metadata, a nil slice and false are returned.
func (m MD) Get(key string) ([]string, bool) {
key = strings.ToLower(key)
list, ok := m[key]
if !ok || len(list) == 0 {
return nil, false
}
return list, true
}
// Set sets the provided values for a given key.
// The values will overwrite any existing values.
// If no values provided, a key will be deleted.
func (m MD) Set(key string, values ...string) {
key = strings.ToLower(key)
if len(values) == 0 {
delete(m, key)
return
}
m[key] = values
}
// Append appends additional values to the given key.
func (m MD) Append(key string, values ...string) {
key = strings.ToLower(key)
if len(values) == 0 {
return
}
current, ok := m[key]
if ok {
m.Set(key, append(current, values...)...)
} else {
m.Set(key, values...)
}
}
func (m MD) setRequest(r *Request) {
for k, values := range m {
for _, v := range values {
r.Metadata = append(r.Metadata, &KeyValue{
Key: k,
Value: v,
})
}
}
}
func (m MD) fromRequest(r *Request) {
for _, kv := range r.Metadata {
m[kv.Key] = append(m[kv.Key], kv.Value)
}
}
type metadataKey struct{}
// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata)
func GetMetadata(ctx context.Context) (MD, bool) {
metadata, ok := ctx.Value(metadataKey{}).(MD)
return metadata, ok
}
// GetMetadataValue gets a specific metadata value by name from context.Context
func GetMetadataValue(ctx context.Context, name string) (string, bool) {
metadata, ok := GetMetadata(ctx)
if !ok {
return "", false
}
if list, ok := metadata.Get(name); ok {
return list[0], true
}
return "", false
}
// WithMetadata attaches metadata map to a context.Context
func WithMetadata(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, metadataKey{}, md)
}

View file

@ -53,10 +53,13 @@ func NewServer(opts ...ServerOpt) (*Server, error) {
return nil, err
}
}
if config.interceptor == nil {
config.interceptor = defaultServerInterceptor
}
return &Server{
config: config,
services: newServiceSet(),
services: newServiceSet(config.interceptor),
done: make(chan struct{}),
listeners: make(map[net.Listener]struct{}),
connections: make(map[*serverConn]struct{}),
@ -341,7 +344,7 @@ func (c *serverConn) run(sctx context.Context) {
default: // proceed
}
mh, p, err := ch.recv(ctx)
mh, p, err := ch.recv()
if err != nil {
status, ok := status.FromError(err)
if !ok {
@ -438,7 +441,7 @@ func (c *serverConn) run(sctx context.Context) {
return
}
if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil {
if err := ch.send(response.id, messageTypeResponse, p); err != nil {
logrus.WithError(err).Error("failed sending message on channel")
return
}
@ -449,7 +452,12 @@ func (c *serverConn) run(sctx context.Context) {
// branch. Basically, it means that we are no longer receiving
// requests due to a terminal error.
recvErr = nil // connection is now "closing"
if err != nil && err != io.EOF {
if err == io.EOF || err == io.ErrUnexpectedEOF {
// The client went away and we should stop processing
// requests, so that the client connection is closed
return
}
if err != nil {
logrus.WithError(err).Error("error receiving message")
}
case <-shutdown:
@ -461,6 +469,12 @@ func (c *serverConn) run(sctx context.Context) {
var noopFunc = func() {}
func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) {
if len(req.Metadata) > 0 {
md := MD{}
md.fromRequest(req)
ctx = WithMetadata(ctx, md)
}
cancel = noopFunc
if req.TimeoutNano == 0 {
return ctx, cancel

View file

@ -37,12 +37,14 @@ type ServiceDesc struct {
}
type serviceSet struct {
services map[string]ServiceDesc
services map[string]ServiceDesc
interceptor UnaryServerInterceptor
}
func newServiceSet() *serviceSet {
func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet {
return &serviceSet{
services: make(map[string]ServiceDesc),
services: make(map[string]ServiceDesc),
interceptor: interceptor,
}
}
@ -84,7 +86,11 @@ func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName strin
return nil
}
resp, err := method(ctx, unmarshal)
info := &UnaryServerInfo{
FullMethod: fullPath(serviceName, methodName),
}
resp, err := s.interceptor(ctx, unmarshal, info, method)
if err != nil {
return nil, err
}
@ -146,5 +152,5 @@ func convertCode(err error) codes.Code {
}
func fullPath(service, method string) string {
return "/" + path.Join("/", service, method)
return "/" + path.Join(service, method)
}

View file

@ -23,10 +23,11 @@ import (
)
type Request struct {
Service string `protobuf:"bytes,1,opt,name=service,proto3"`
Method string `protobuf:"bytes,2,opt,name=method,proto3"`
Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
Service string `protobuf:"bytes,1,opt,name=service,proto3"`
Method string `protobuf:"bytes,2,opt,name=method,proto3"`
Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"`
}
func (r *Request) Reset() { *r = Request{} }
@ -41,3 +42,22 @@ type Response struct {
func (r *Response) Reset() { *r = Response{} }
func (r *Response) String() string { return fmt.Sprintf("%+#v", r) }
func (r *Response) ProtoMessage() {}
type StringList struct {
List []string `protobuf:"bytes,1,rep,name=list,proto3"`
}
func (r *StringList) Reset() { *r = StringList{} }
func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) }
func (r *StringList) ProtoMessage() {}
func makeStringList(item ...string) StringList { return StringList{List: item} }
type KeyValue struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3"`
Value string `protobuf:"bytes,2,opt,name=value,proto3"`
}
func (m *KeyValue) Reset() { *m = KeyValue{} }
func (*KeyValue) ProtoMessage() {}
func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) }

View file

@ -1,42 +1,96 @@
# gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
The Go implementation of [gRPC](https://grpc.io/): A high performance, open
source, general RPC framework that puts mobile and HTTP/2 first. For more
information see the [gRPC Quick Start:
Go](https://grpc.io/docs/quickstart/go.html) guide.
Installation
------------
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
To install this package, you need to install Go and setup your Go workspace on
your computer. The simplest way to install the library is to run:
```
$ go get -u google.golang.org/grpc
```
With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
your source code and `go [build|run|test]` will automatically download the
necessary dependencies ([Go modules
ref](https://github.com/golang/go/wiki/Modules)).
If you are trying to access grpc-go from within China, please see the
[FAQ](#FAQ) below.
Prerequisites
-------------
gRPC-Go requires Go 1.9 or later.
Constraints
-----------
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
Documentation
-------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
descriptions.
- Documentation on specific topics can be found in the [Documentation
directory](Documentation/).
- Examples can be found in the [examples directory](examples/).
Performance
-----------
See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Performance benchmark data for grpc-go and other languages is maintained in
[this
dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status
------
General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
General Availability [Google Cloud Platform Launch
Stages](https://cloud.google.com/terms/launch-stages).
FAQ
---
#### I/O Timeout Errors
The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens:
```
$ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
```
To build Go code, there are several options:
- Set up a VPN and access google.golang.org through that.
- Without Go module support: `git clone` the repo manually:
```
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
```
You will need to do the same for all of grpc's dependencies in `golang.org`,
e.g. `golang.org/x/net`.
- With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory:
```
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy
go mod vendor
go build -mod=vendor
```
Again, this will need to be done for all transitive dependencies hosted on
golang.org as well. Please refer to [this
issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
this concern.
#### Compiling error, undefined: grpc.SupportPackageIsVersion
Please update proto package, gRPC package and rebuild the proto files:

View file

@ -43,7 +43,7 @@ type Address struct {
// BalancerConfig specifies the configurations for Balancer.
//
// Deprecated: please use package balancer.
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerConfig struct {
// DialCreds is the transport credential the Balancer implementation can
// use to dial to a remote load balancer server. The Balancer implementations
@ -57,7 +57,7 @@ type BalancerConfig struct {
// BalancerGetOptions configures a Get call.
//
// Deprecated: please use package balancer.
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerGetOptions struct {
// BlockingWait specifies whether Get should block when there is no
// connected address.
@ -66,7 +66,7 @@ type BalancerGetOptions struct {
// Balancer chooses network addresses for RPCs.
//
// Deprecated: please use package balancer.
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type Balancer interface {
// Start does the initialization work to bootstrap a Balancer. For example,
// this function may start the name resolution and watch the updates. It will
@ -120,7 +120,7 @@ type Balancer interface {
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly.
//
// Deprecated: please use package balancer/roundrobin.
// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
func RoundRobin(r naming.Resolver) Balancer {
return &roundRobin{r: r}
}

View file

@ -22,6 +22,7 @@ package balancer
import (
"context"
"encoding/json"
"errors"
"net"
"strings"
@ -31,6 +32,7 @@ import (
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
var (
@ -39,7 +41,10 @@ var (
)
// Register registers the balancer builder to the balancer map. b.Name
// (lowercased) will be used as the name registered with this builder.
// (lowercased) will be used as the name registered with this builder. If the
// Builder implements ConfigParser, ParseConfig will be called when new service
// configs are received by the resolver, and the result will be provided to the
// Balancer in UpdateClientConnState.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Balancers are
@ -138,6 +143,8 @@ type ClientConn interface {
ResolveNow(resolver.ResolveNowOption)
// Target returns the dial target for this ClientConn.
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string
}
@ -155,6 +162,10 @@ type BuildOptions struct {
Dialer func(context.Context, string) (net.Conn, error)
// ChannelzParentID is the entity parent's channelz unique identification number.
ChannelzParentID int64
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
// passed to the resolver.
// See the documentation for the resolver.Target type for details about what it contains.
Target resolver.Target
}
// Builder creates a balancer.
@ -166,6 +177,14 @@ type Builder interface {
Name() string
}
// ConfigParser parses load balancer configs.
type ConfigParser interface {
// ParseConfig parses the JSON load balancer config provided into an
// internal form or returns an error if the config is invalid. For future
// compatibility reasons, unknown fields in the config should be ignored.
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
}
// PickOptions contains addition information for the Pick operation.
type PickOptions struct {
// FullMethodName is the method name that NewClientStream() is called
@ -264,7 +283,7 @@ type Balancer interface {
// non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateResolverState will be called instead.
// UpdateClientConnState will be called instead.
HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
@ -277,14 +296,23 @@ type SubConnState struct {
// TODO: add last connection error
}
// ClientConnState describes the state of a ClientConn relevant to the
// balancer.
type ClientConnState struct {
ResolverState resolver.State
// The parsed load balancing configuration returned by the builder's
// ParseConfig method, if implemented.
BalancerConfig serviceconfig.LoadBalancingConfig
}
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateResolverState method will be called instead
// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
// HandleSubConnStateChange.
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateResolverState is called by gRPC when the state of the resolver
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes.
UpdateResolverState(resolver.State)
UpdateClientConnState(ClientConnState)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)

View file

@ -70,13 +70,15 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error)
panic("not implemented")
}
func (b *baseBalancer) UpdateResolverState(s resolver.State) {
// TODO: handle s.Err (log if not nil) once implemented.
// TODO: handle s.ServiceConfig?
grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
if grpclog.V(2) {
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
}
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{})
for _, a := range s.Addresses {
for _, a := range s.ResolverState.Addresses {
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns).
@ -127,10 +129,14 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
}
oldS, ok := b.scStates[sc]
if !ok {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
}
return
}
b.scStates[sc] = s

View file

@ -88,7 +88,7 @@ type ccBalancerWrapper struct {
cc *ClientConn
balancer balancer.Balancer
stateChangeQueue *scStateUpdateBuffer
resolverUpdateCh chan *resolver.State
ccUpdateCh chan *balancer.ClientConnState
done chan struct{}
mu sync.Mutex
@ -99,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
ccb := &ccBalancerWrapper{
cc: cc,
stateChangeQueue: newSCStateUpdateBuffer(),
resolverUpdateCh: make(chan *resolver.State, 1),
ccUpdateCh: make(chan *balancer.ClientConnState, 1),
done: make(chan struct{}),
subConns: make(map[*acBalancerWrapper]struct{}),
}
@ -126,7 +126,7 @@ func (ccb *ccBalancerWrapper) watcher() {
} else {
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
}
case s := <-ccb.resolverUpdateCh:
case s := <-ccb.ccUpdateCh:
select {
case <-ccb.done:
ccb.balancer.Close()
@ -134,9 +134,9 @@ func (ccb *ccBalancerWrapper) watcher() {
default:
}
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateResolverState(*s)
ub.UpdateClientConnState(*s)
} else {
ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
}
case <-ccb.done:
}
@ -151,9 +151,11 @@ func (ccb *ccBalancerWrapper) watcher() {
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
ccb.UpdateBalancerState(connectivity.Connecting, nil)
return
default:
}
ccb.cc.firstResolveEvent.Fire()
}
}
@ -178,9 +180,10 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
})
}
func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
if ccb.cc.curBalancerName != grpclbName {
// Filter any grpclb addresses since we don't have the grpclb balancer.
s := &ccs.ResolverState
for i := 0; i < len(s.Addresses); {
if s.Addresses[i].Type == resolver.GRPCLB {
copy(s.Addresses[i:], s.Addresses[i+1:])
@ -191,10 +194,10 @@ func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
}
}
select {
case <-ccb.resolverUpdateCh:
case <-ccb.ccUpdateCh:
default:
}
ccb.resolverUpdateCh <- &s
ccb.ccUpdateCh <- ccs
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {

View file

@ -20,7 +20,6 @@ package grpc
import (
"context"
"strings"
"sync"
"google.golang.org/grpc/balancer"
@ -34,13 +33,7 @@ type balancerWrapperBuilder struct {
}
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
targetAddr := cc.Target()
targetSplitted := strings.Split(targetAddr, ":///")
if len(targetSplitted) >= 2 {
targetAddr = targetSplitted[1]
}
bwb.b.Start(targetAddr, BalancerConfig{
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
DialCreds: opts.DialCreds,
Dialer: opts.Dialer,
})
@ -49,7 +42,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
balancer: bwb.b,
pickfirst: pickfirst,
cc: cc,
targetAddr: targetAddr,
targetAddr: opts.Target.Endpoint,
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
@ -120,7 +113,7 @@ func (bw *balancerWrapper) lbWatcher() {
}
for addrs := range notifyCh {
grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
if bw.pickfirst {
var (
oldA resolver.Address

View file

@ -38,13 +38,13 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/serviceconfig"
"google.golang.org/grpc/status"
)
@ -137,6 +137,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
opt.apply(&cc.dopts)
}
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
defer func() {
if err != nil {
cc.Close()
@ -290,6 +293,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
}
// Build the resolver.
@ -327,6 +331,68 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return cc, nil
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
func chainUnaryClientInterceptors(cc *ClientConn) {
interceptors := cc.dopts.chainUnaryInts
// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
// be executed before any other chained interceptors.
if cc.dopts.unaryInt != nil {
interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
}
var chainedInt UnaryClientInterceptor
if len(interceptors) == 0 {
chainedInt = nil
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
}
}
cc.dopts.unaryInt = chainedInt
}
// getChainUnaryInvoker recursively generate the chained unary invoker.
func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
if curr == len(interceptors)-1 {
return finalInvoker
}
return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
}
}
// chainStreamClientInterceptors chains all stream client interceptors into one.
func chainStreamClientInterceptors(cc *ClientConn) {
interceptors := cc.dopts.chainStreamInts
// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
// be executed before any other chained interceptors.
if cc.dopts.streamInt != nil {
interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
}
var chainedInt StreamClientInterceptor
if len(interceptors) == 0 {
chainedInt = nil
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
}
}
cc.dopts.streamInt = chainedInt
}
// getChainStreamer recursively generate the chained client stream constructor.
func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
if curr == len(interceptors)-1 {
return finalStreamer
}
return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
}
}
// connectivityStateManager keeps the connectivity.State of ClientConn.
// This struct will eventually be exported so the balancers can access it.
type connectivityStateManager struct {
@ -466,24 +532,6 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
}
}
// gRPC should resort to default service config when:
// * resolver service config is disabled
// * or, resolver does not return a service config or returns an invalid one.
func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool {
if cc.dopts.disableServiceConfig {
return true
}
// The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type.
// Right now, we assume that empty service config string means resolver does not return a config.
if sc == "" {
return true
}
// TODO: the logic below is temporary. Once we finish the logic to validate service config
// in resolver, we will replace the logic below.
_, err := parseServiceConfig(sc)
return err != nil
}
func (cc *ClientConn) updateResolverState(s resolver.State) error {
cc.mu.Lock()
defer cc.mu.Unlock()
@ -494,54 +542,47 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error {
return nil
}
if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) {
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
}
} else {
// TODO: the parsing logic below will be moved inside resolver.
sc, err := parseServiceConfig(s.ServiceConfig)
if err != nil {
return err
}
if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig {
cc.applyServiceConfig(sc)
}
}
// update the service config that will be sent to balancer.
if cc.sc != nil {
s.ServiceConfig = cc.sc.rawJSONString
} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
cc.applyServiceConfig(sc)
}
var balCfg serviceconfig.LoadBalancingConfig
if cc.dopts.balancerBuilder == nil {
// Only look at balancer types and switch balancer if balancer dial
// option is not set.
var isGRPCLB bool
for _, a := range s.Addresses {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
var newBalancerName string
// TODO: use new loadBalancerConfig field with appropriate priority.
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
if cc.sc != nil && cc.sc.lbConfig != nil {
newBalancerName = cc.sc.lbConfig.name
balCfg = cc.sc.lbConfig.cfg
} else {
newBalancerName = PickFirstBalancerName
var isGRPCLB bool
for _, a := range s.Addresses {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
newBalancerName = PickFirstBalancerName
}
}
cc.switchBalancer(newBalancerName)
} else if cc.balancerWrapper == nil {
// Balancer dial option was set, and this is the first time handling
// resolved addresses. Build a balancer with dopts.balancerBuilder.
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
cc.balancerWrapper.updateResolverState(s)
cc.firstResolveEvent.Fire()
cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
return nil
}
@ -554,7 +595,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error {
//
// Caller must hold cc.mu.
func (cc *ClientConn) switchBalancer(name string) {
if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
if strings.EqualFold(cc.curBalancerName, name) {
return
}
@ -693,6 +734,8 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
// Update connectivity state within the lock to prevent subsequent or
// concurrent calls from resetting the transport more than once.
ac.updateConnectivityState(connectivity.Connecting)
ac.mu.Unlock()
@ -703,7 +746,16 @@ func (ac *addrConn) connect() error {
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
//
// It checks whether current connected address of ac is in the new addrs list.
// If ac is Connecting, it returns false. The caller should tear down the ac and
// create a new one. Note that the backoff will be reset when this happens.
//
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
// addresses will be picked up by retry in the next iteration after backoff.
//
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
@ -711,17 +763,18 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if ac.state == connectivity.Shutdown {
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
ac.addrs = addrs
return true
}
// Unless we're busy reconnecting already, let's reconnect from the top of
// the list.
if ac.state != connectivity.Ready {
if ac.state == connectivity.Connecting {
return false
}
// ac.state is Ready, try to find the connected address.
var curAddrFound bool
for _, a := range addrs {
if reflect.DeepEqual(ac.curAddr, a) {
@ -970,6 +1023,9 @@ func (ac *addrConn) resetTransport() {
// The spec doesn't mention what should be done for multiple addresses.
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
connectDeadline := time.Now().Add(dialDuration)
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.mu.Unlock()
newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
@ -1004,55 +1060,32 @@ func (ac *addrConn) resetTransport() {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
newTr.Close()
ac.mu.Unlock()
newTr.Close()
return
}
ac.curAddr = addr
ac.transport = newTr
ac.backoffIdx = 0
healthCheckConfig := ac.cc.healthCheckConfig()
// LB channel health checking is only enabled when all the four requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
// 3. a service config with non-empty healthCheckConfig field is provided,
// 4. the current load balancer allows it.
hctx, hcancel := context.WithCancel(ac.ctx)
healthcheckManagingState := false
if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
if ac.cc.dopts.healthCheckFunc == nil {
// TODO: add a link to the health check doc in the error message.
grpclog.Error("the client side LB channel health check function has not been set.")
} else {
// TODO(deklerk) refactor to just return transport
go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
healthcheckManagingState = true
}
}
if !healthcheckManagingState {
ac.updateConnectivityState(connectivity.Ready)
}
ac.startHealthCheck(hctx)
ac.mu.Unlock()
// Block until the created transport is down. And when this happens,
// we restart from the top of the addr list.
<-reconnect.Done()
hcancel()
// Need to reconnect after a READY, the addrConn enters
// TRANSIENT_FAILURE.
// restart connecting - the top of the loop will set state to
// CONNECTING. This is against the current connectivity semantics doc,
// however it allows for graceful behavior for RPCs not yet dispatched
// - unfortunate timing would otherwise lead to the RPC failing even
// though the TRANSIENT_FAILURE state (called for by the doc) would be
// instantaneous.
//
// This will set addrConn to TRANSIENT_FAILURE for a very short period
// of time, and turns CONNECTING. It seems reasonable to skip this, but
// READY-CONNECTING is not a valid transition.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
ac.updateConnectivityState(connectivity.TransientFailure)
ac.mu.Unlock()
// Ideally we should transition to Idle here and block until there is
// RPC activity that leads to the balancer requesting a reconnect of
// the associated SubConn.
}
}
@ -1066,8 +1099,6 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
ac.mu.Unlock()
return nil, resolver.Address{}, nil, errConnClosing
}
ac.updateConnectivityState(connectivity.Connecting)
ac.transport = nil
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
@ -1111,14 +1142,35 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
Authority: ac.cc.authority,
}
once := sync.Once{}
onGoAway := func(r transport.GoAwayReason) {
ac.mu.Lock()
ac.adjustParams(r)
once.Do(func() {
if ac.state == connectivity.Ready {
// Prevent this SubConn from being used for new RPCs by setting its
// state to Connecting.
//
// TODO: this should be Idle when grpc-go properly supports it.
ac.updateConnectivityState(connectivity.Connecting)
}
})
ac.mu.Unlock()
reconnect.Fire()
}
onClose := func() {
ac.mu.Lock()
once.Do(func() {
if ac.state == connectivity.Ready {
// Prevent this SubConn from being used for new RPCs by setting its
// state to Connecting.
//
// TODO: this should be Idle when grpc-go properly supports it.
ac.updateConnectivityState(connectivity.Connecting)
}
})
ac.mu.Unlock()
close(onCloseCalled)
reconnect.Fire()
}
@ -1140,60 +1192,99 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
return nil, nil, err
}
if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
select {
case <-time.After(connectDeadline.Sub(time.Now())):
// We didn't get the preface in time.
newTr.Close()
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
return nil, nil, errors.New("timed out waiting for server handshake")
case <-prefaceReceived:
// We got the preface - huzzah! things are good.
case <-onCloseCalled:
// The transport has already closed - noop.
return nil, nil, errors.New("connection closed")
// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
}
select {
case <-time.After(connectDeadline.Sub(time.Now())):
// We didn't get the preface in time.
newTr.Close()
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
return nil, nil, errors.New("timed out waiting for server handshake")
case <-prefaceReceived:
// We got the preface - huzzah! things are good.
case <-onCloseCalled:
// The transport has already closed - noop.
return nil, nil, errors.New("connection closed")
// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
}
return newTr, reconnect, nil
}
func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
// Set up the health check helper functions
newStream := func() (interface{}, error) {
return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr)
// startHealthCheck starts the health checking stream (RPC) to watch the health
// stats of this connection if health checking is requested and configured.
//
// LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
// 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it
//
// It sets addrConn to READY if the health checking stream is not started.
//
// Caller must hold ac.mu.
func (ac *addrConn) startHealthCheck(ctx context.Context) {
var healthcheckManagingState bool
defer func() {
if !healthcheckManagingState {
ac.updateConnectivityState(connectivity.Ready)
}
}()
if ac.cc.dopts.disableHealthCheck {
return
}
firstReady := true
reportHealth := func(ok bool) {
healthCheckConfig := ac.cc.healthCheckConfig()
if healthCheckConfig == nil {
return
}
if !ac.scopts.HealthCheckEnabled {
return
}
healthCheckFunc := ac.cc.dopts.healthCheckFunc
if healthCheckFunc == nil {
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
grpclog.Error("Health check is requested but health check function is not set.")
return
}
healthcheckManagingState = true
// Set up the health check helper functions.
currentTr := ac.transport
newStream := func(method string) (interface{}, error) {
ac.mu.Lock()
if ac.transport != currentTr {
ac.mu.Unlock()
return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
}
ac.mu.Unlock()
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
}
setConnectivityState := func(s connectivity.State) {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.transport != newTr {
if ac.transport != currentTr {
return
}
if ok {
if firstReady {
firstReady = false
ac.curAddr = addr
}
ac.updateConnectivityState(connectivity.Ready)
} else {
ac.updateConnectivityState(connectivity.TransientFailure)
}
ac.updateConnectivityState(s)
}
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
Severity: channelz.CtError,
})
// Start the health checking stream.
go func() {
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
Severity: channelz.CtError,
})
}
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
}
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
}
}
}()
}
func (ac *addrConn) resetConnectBackoff() {

View file

@ -132,7 +132,8 @@ const (
// Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected
// by retrying with a backoff.
// by retrying with a backoff. Note that it is not always safe to retry
// non-idempotent operations.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.

View file

@ -278,24 +278,22 @@ type ChannelzSecurityValue interface {
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
LocalCertificate []byte
RemoteCertificate []byte
}
func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided.
type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string
Value proto.Message
}
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",

View file

@ -39,8 +39,12 @@ import (
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
cp Compressor
dc Decompressor
bs backoff.Strategy
@ -56,7 +60,6 @@ type dialOptions struct {
balancerBuilder balancer.Builder
// This is to support grpclb.
resolverBuilder resolver.Builder
reqHandshake envconfig.RequireHandshakeSetting
channelzParentID int64
disableServiceConfig bool
disableRetry bool
@ -96,17 +99,6 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
}
}
// WithWaitForHandshake blocks until the initial settings frame is received from
// the server before assigning RPCs to the connection.
//
// Deprecated: this is the default behavior, and this option will be removed
// after the 1.18 release.
func WithWaitForHandshake() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.reqHandshake = envconfig.RequireHandshakeOn
})
}
// WithWriteBufferSize determines how much data can be batched before doing a
// write on the wire. The corresponding memory allocation for this buffer will
// be twice the size to keep syscalls low. The default value for this buffer is
@ -152,7 +144,8 @@ func WithInitialConnWindowSize(s int32) DialOption {
// WithMaxMsgSize returns a DialOption which sets the maximum message size the
// client can receive.
//
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will
// be supported throughout 1.x.
func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
}
@ -168,7 +161,8 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption {
// WithCodec returns a DialOption which sets a codec for message marshaling and
// unmarshaling.
//
// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.
// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be
// supported throughout 1.x.
func WithCodec(c Codec) DialOption {
return WithDefaultCallOptions(CallCustomCodec(c))
}
@ -177,7 +171,7 @@ func WithCodec(c Codec) DialOption {
// message compression. It has lower priority than the compressor set by the
// UseCompressor CallOption.
//
// Deprecated: use UseCompressor instead.
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
func WithCompressor(cp Compressor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.cp = cp
@ -192,7 +186,8 @@ func WithCompressor(cp Compressor) DialOption {
// message. If no compressor is registered for the encoding, an Unimplemented
// status error will be returned.
//
// Deprecated: use encoding.RegisterCompressor instead.
// Deprecated: use encoding.RegisterCompressor instead. Will be supported
// throughout 1.x.
func WithDecompressor(dc Decompressor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.dc = dc
@ -203,7 +198,7 @@ func WithDecompressor(dc Decompressor) DialOption {
// Name resolver will be ignored if this DialOption is specified.
//
// Deprecated: use the new balancer APIs in balancer package and
// WithBalancerName.
// WithBalancerName. Will be removed in a future 1.x release.
func WithBalancer(b Balancer) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.balancerBuilder = &balancerWrapperBuilder{
@ -219,7 +214,8 @@ func WithBalancer(b Balancer) DialOption {
// The balancer cannot be overridden by balancer option specified by service
// config.
//
// This is an EXPERIMENTAL API.
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
// instead. Will be removed in a future 1.x release.
func WithBalancerName(balancerName string) DialOption {
builder := balancer.Get(balancerName)
if builder == nil {
@ -240,9 +236,10 @@ func withResolverBuilder(b resolver.Builder) DialOption {
// WithServiceConfig returns a DialOption which has a channel to read the
// service configuration.
//
// Deprecated: service config should be received through name resolver, as
// specified here.
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
// Deprecated: service config should be received through name resolver or via
// WithDefaultServiceConfig, as specified at
// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
// removed in a future 1.x release.
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.scChan = c
@ -325,7 +322,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
// Deprecated: use DialContext and context.WithTimeout instead.
// Deprecated: use DialContext and context.WithTimeout instead. Will be
// supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.timeout = d
@ -352,7 +350,8 @@ func init() {
// is returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
//
// Deprecated: use WithContextDialer instead
// Deprecated: use WithContextDialer instead. Will be supported throughout
// 1.x.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
return WithContextDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
@ -414,6 +413,17 @@ func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
})
}
// WithChainUnaryInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithUnaryInterceptor will always be prepended to the chain.
func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
})
}
// WithStreamInterceptor returns a DialOption that specifies the interceptor for
// streaming RPCs.
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
@ -422,6 +432,17 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
})
}
// WithChainStreamInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainStreamInts = append(o.chainStreamInts, interceptors...)
})
}
// WithAuthority returns a DialOption that specifies the value to be used as the
// :authority pseudo-header. This value only works with WithInsecure and has no
// effect if TransportCredentials are present.
@ -440,12 +461,12 @@ func WithChannelzParentID(id int64) DialOption {
})
}
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
// service config provided by the resolver and provides a hint to the resolver
// to not fetch service configs.
//
// Note that, this dial option only disables service config from resolver. If
// default service config is provided, grpc will use the default service config.
// Note that this dial option only disables service config from resolver. If
// default service config is provided, gRPC will use the default service config.
func WithDisableServiceConfig() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableServiceConfig = true
@ -454,8 +475,10 @@ func WithDisableServiceConfig() DialOption {
// WithDefaultServiceConfig returns a DialOption that configures the default
// service config, which will be used in cases where:
// 1. WithDisableServiceConfig is called.
// 2. Resolver does not return service config or if the resolver gets and invalid config.
//
// 1. WithDisableServiceConfig is also used.
// 2. Resolver does not return a service config or if the resolver returns an
// invalid service config.
//
// This API is EXPERIMENTAL.
func WithDefaultServiceConfig(s string) DialOption {
@ -511,7 +534,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
func defaultDialOptions() dialOptions {
return dialOptions{
disableRetry: !envconfig.Retry,
reqHandshake: envconfig.RequireHandshake,
healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
WriteBufferSize: defaultWriteBufSize,

View file

@ -7,13 +7,13 @@ require (
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1
github.com/golang/protobuf v1.2.0
github.com/google/go-cmp v0.2.0
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
golang.org/x/tools v0.0.0-20190311212946-11955173bddd
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
google.golang.org/appengine v1.1.0 // indirect
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
)

View file

@ -26,6 +26,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff"
@ -51,7 +52,11 @@ func init() {
internal.HealthCheckFunc = clientHealthCheck
}
func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
const healthCheckMethod = "/grpc.health.v1.Health/Watch"
// This function implements the protocol defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
tryCnt := 0
retryConnection:
@ -65,7 +70,8 @@ retryConnection:
if ctx.Err() != nil {
return nil
}
rawS, err := newStream()
setConnectivityState(connectivity.Connecting)
rawS, err := newStream(healthCheckMethod)
if err != nil {
continue retryConnection
}
@ -73,7 +79,7 @@ retryConnection:
s, ok := rawS.(grpc.ClientStream)
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
if !ok {
reportHealth(true)
setConnectivityState(connectivity.Ready)
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
}
@ -89,19 +95,23 @@ retryConnection:
// Reports healthy for the LBing purposes if health check is not implemented in the server.
if status.Code(err) == codes.Unimplemented {
reportHealth(true)
setConnectivityState(connectivity.Ready)
return err
}
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
if err != nil {
reportHealth(false)
setConnectivityState(connectivity.TransientFailure)
continue retryConnection
}
// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
tryCnt = 0
reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
if resp.Status == healthpb.HealthCheckResponse_SERVING {
setConnectivityState(connectivity.Ready)
} else {
setConnectivityState(connectivity.TransientFailure)
}
}
}
}

View file

@ -24,6 +24,7 @@
package channelz
import (
"fmt"
"sort"
"sync"
"sync/atomic"
@ -95,9 +96,14 @@ func (d *dbWrapper) get() *channelMap {
// NewChannelzStorage initializes channelz data storage and id generator.
//
// This function returns a cleanup function to wait for all channelz state to be reset by the
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
// to remove some entity just register by the new test, since the id space is the same.
//
// Note: This function is exported for testing purpose only. User should not call
// it in most cases.
func NewChannelzStorage() {
func NewChannelzStorage() (cleanup func() error) {
db.set(&channelMap{
topLevelChannels: make(map[int64]struct{}),
channels: make(map[int64]*channel),
@ -107,6 +113,28 @@ func NewChannelzStorage() {
subChannels: make(map[int64]*subChannel),
})
idGen.reset()
return func() error {
var err error
cm := db.get()
if cm == nil {
return nil
}
for i := 0; i < 1000; i++ {
cm.mu.Lock()
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
cm.mu.Unlock()
// all things stored in the channelz map have been cleared.
return nil
}
cm.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
cm.mu.Lock()
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
cm.mu.Unlock()
return err
}
}
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a

View file

@ -25,40 +25,11 @@ import (
)
const (
prefix = "GRPC_GO_"
retryStr = prefix + "RETRY"
requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
)
// RequireHandshakeSetting describes the settings for handshaking.
type RequireHandshakeSetting int
const (
// RequireHandshakeOn indicates to wait for handshake before considering a
// connection ready/successful.
RequireHandshakeOn RequireHandshakeSetting = iota
// RequireHandshakeOff indicates to not wait for handshake before
// considering a connection ready/successful.
RequireHandshakeOff
prefix = "GRPC_GO_"
retryStr = prefix + "RETRY"
)
var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
// environment variable.
//
// Will be removed after the 1.18 release.
RequireHandshake = RequireHandshakeOn
)
func init() {
switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
case "on":
fallthrough
default:
RequireHandshake = RequireHandshakeOn
case "off":
RequireHandshake = RequireHandshakeOff
}
}

View file

@ -23,6 +23,8 @@ package internal
import (
"context"
"time"
"google.golang.org/grpc/connectivity"
)
var (
@ -37,10 +39,25 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfig is a function to parse JSON service configs into
// opaque data structures.
ParseServiceConfig func(sc string) (interface{}, error)
// StatusRawProto is exported by status/status.go. This func returns a
// pointer to the wrapped Status proto for a given status.Status without a
// call to proto.Clone(). The returned Status proto should not be mutated by
// the caller.
StatusRawProto interface{} // func (*status.Status) *spb.Status
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.

View file

@ -23,6 +23,7 @@ import (
"fmt"
"runtime"
"sync"
"sync/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
@ -84,12 +85,24 @@ func (il *itemList) isEmpty() bool {
// the control buffer of transport. They represent different aspects of
// control tasks, e.g., flow control, settings, streaming resetting, etc.
// maxQueuedTransportResponseFrames is the most queued "transport response"
// frames we will buffer before preventing new reads from occurring on the
// transport. These are control frames sent in response to client requests,
// such as RST_STREAM due to bad headers or settings acks.
const maxQueuedTransportResponseFrames = 50
type cbItem interface {
isTransportResponseFrame() bool
}
// registerStream is used to register an incoming stream with loopy writer.
type registerStream struct {
streamID uint32
wq *writeQuota
}
func (*registerStream) isTransportResponseFrame() bool { return false }
// headerFrame is also used to register stream on the client-side.
type headerFrame struct {
streamID uint32
@ -102,6 +115,10 @@ type headerFrame struct {
onOrphaned func(error) // Valid on client-side
}
func (h *headerFrame) isTransportResponseFrame() bool {
return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
}
type cleanupStream struct {
streamID uint32
rst bool
@ -109,6 +126,8 @@ type cleanupStream struct {
onWrite func()
}
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
type dataFrame struct {
streamID uint32
endStream bool
@ -119,27 +138,41 @@ type dataFrame struct {
onEachWrite func()
}
func (*dataFrame) isTransportResponseFrame() bool { return false }
type incomingWindowUpdate struct {
streamID uint32
increment uint32
}
func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
type outgoingWindowUpdate struct {
streamID uint32
increment uint32
}
func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
return false // window updates are throttled by thresholds
}
type incomingSettings struct {
ss []http2.Setting
}
func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
type outgoingSettings struct {
ss []http2.Setting
}
func (*outgoingSettings) isTransportResponseFrame() bool { return false }
type incomingGoAway struct {
}
func (*incomingGoAway) isTransportResponseFrame() bool { return false }
type goAway struct {
code http2.ErrCode
debugData []byte
@ -147,15 +180,21 @@ type goAway struct {
closeConn bool
}
func (*goAway) isTransportResponseFrame() bool { return false }
type ping struct {
ack bool
data [8]byte
}
func (*ping) isTransportResponseFrame() bool { return true }
type outFlowControlSizeRequest struct {
resp chan uint32
}
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
type outStreamState int
const (
@ -238,6 +277,14 @@ type controlBuffer struct {
consumerWaiting bool
list *itemList
err error
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
trfChan atomic.Value // *chan struct{}
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
@ -248,12 +295,24 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer {
}
}
func (c *controlBuffer) put(it interface{}) error {
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
// controlbuf.
func (c *controlBuffer) throttle() {
ch, _ := c.trfChan.Load().(*chan struct{})
if ch != nil {
select {
case <-*ch:
case <-c.done:
}
}
}
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
var wakeUp bool
c.mu.Lock()
if c.err != nil {
@ -271,6 +330,15 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
c.consumerWaiting = false
}
c.list.enqueue(it)
if it.isTransportResponseFrame() {
c.transportResponseFrames++
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
ch := make(chan struct{})
c.trfChan.Store(&ch)
}
}
c.mu.Unlock()
if wakeUp {
select {
@ -304,7 +372,17 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
return nil, c.err
}
if !c.list.isEmpty() {
h := c.list.dequeue()
h := c.list.dequeue().(cbItem)
if h.isTransportResponseFrame() {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
ch := c.trfChan.Load().(*chan struct{})
close(*ch)
c.trfChan.Store((*chan struct{})(nil))
}
c.transportResponseFrames--
}
c.mu.Unlock()
return h, nil
}

View file

@ -149,6 +149,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
n = uint32(math.MaxInt32)
}
f.mu.Lock()
defer f.mu.Unlock()
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
// can send without a window update.
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@ -169,10 +170,8 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
f.delta = n
}
f.mu.Unlock()
return f.delta
}
f.mu.Unlock()
return 0
}

View file

@ -24,6 +24,7 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -347,7 +348,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ht.stats.HandleRPC(s.ctx, inHeader)
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
windowHandler: func(int) {},
}
@ -361,7 +362,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
for buf := make([]byte, readSize); ; {
n, err := req.Body.Read(buf)
if n > 0 {
s.buf.put(recvMsg{data: buf[:n:n]})
s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
buf = buf[n:]
}
if err != nil {

View file

@ -117,6 +117,8 @@ type http2Client struct {
onGoAway func(GoAwayReason)
onClose func()
bufferPool *bufferPool
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@ -249,6 +251,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
onGoAway: onGoAway,
onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
@ -367,6 +370,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -437,6 +441,15 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
for _, vv := range added {
for i, v := range vv {
if i%2 == 0 {
@ -450,15 +463,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
}
}
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
if md, ok := t.md.(*metadata.MD); ok {
for k, vv := range *md {
@ -489,6 +493,9 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string {
}
func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
if len(t.perRPCCreds) == 0 {
return nil, nil
}
authData := map[string]string{}
for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience)
@ -509,7 +516,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
}
func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
callAuthData := map[string]string{}
var callAuthData map[string]string
// Check if credentials.PerRPCCredentials were provided via call options.
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
@ -521,6 +528,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
if err != nil {
return nil, status.Errorf(codes.Internal, "transport: %v", err)
}
callAuthData = make(map[string]string, len(data))
for k, v := range data {
// Capital header names are illegal in HTTP/2
k = strings.ToLower(k)
@ -549,10 +557,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
s.write(recvMsg{err: err})
close(s.done)
// If headerChan isn't closed, then close it.
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
close(s.headerChan)
}
}
hdr := &headerFrame{
hf: headerFields,
@ -713,7 +720,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
s.write(recvMsg{err: err})
}
// If headerChan isn't closed, then close it.
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
s.noHeaders = true
close(s.headerChan)
}
@ -765,6 +772,9 @@ func (t *http2Client) Close() error {
t.mu.Unlock()
return nil
}
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
t.onClose()
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
@ -785,7 +795,6 @@ func (t *http2Client) Close() error {
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
t.onClose()
return err
}
@ -794,21 +803,21 @@ func (t *http2Client) Close() error {
// stream is closed. If there are no active streams, the transport is closed
// immediately. This does nothing if the transport is already draining or
// closing.
func (t *http2Client) GracefulClose() error {
func (t *http2Client) GracefulClose() {
t.mu.Lock()
// Make sure we move to draining only from active.
if t.state == draining || t.state == closing {
t.mu.Unlock()
return nil
return
}
t.state = draining
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
return t.Close()
t.Close()
return
}
t.controlBuf.put(&incomingGoAway{})
return nil
}
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
@ -946,9 +955,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
data := make([]byte, len(f.Data()))
copy(data, f.Data())
s.write(recvMsg{data: data})
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
}
}
// The server has closed the stream without sending trailers. Record that
@ -973,9 +983,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
statusCode = codes.Unknown
}
if statusCode == codes.Canceled {
// Our deadline was already exceeded, and that was likely the cause of
// this cancelation. Alter the status code accordingly.
if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
// of this cancelation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@ -1080,11 +1090,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
default:
t.setGoAwayReason(f)
close(t.goAway)
t.state = draining
t.controlBuf.put(&incomingGoAway{})
// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
// Notify the clientconn about the GOAWAY before we set the state to
// draining, to allow the client to stop attempting to create streams
// before disallowing new streams on this connection.
t.onGoAway(t.goAwayReason)
t.state = draining
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@ -1142,26 +1153,24 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
endStream := frame.StreamEnded()
atomic.StoreUint32(&s.bytesReceived, 1)
initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0
initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
if !initialHeader && !endStream {
// As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear
// at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
// As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
return
}
state := &decodeState{}
// Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received
// which indicates peer speaking gRPC, we are in gRPC mode.
// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
state.data.isGRPC = !initialHeader
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
return
}
var isHeader bool
isHeader := false
defer func() {
if t.statsHandler != nil {
if isHeader {
@ -1180,10 +1189,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}()
// If headers haven't been received yet.
if initialHeader {
// If headerChan hasn't been closed yet
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
if !endStream {
// Headers frame is ResponseHeader.
// HEADERS frame block carries a Response-Headers.
isHeader = true
// These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed
@ -1192,14 +1201,17 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if len(state.data.mdata) > 0 {
s.header = state.data.mdata
}
close(s.headerChan)
return
} else {
// HEADERS frame block carries a Trailers-Only.
s.noHeaders = true
}
// Headers frame is Trailers-only.
s.noHeaders = true
close(s.headerChan)
}
if !endStream {
return
}
// if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
@ -1233,6 +1245,7 @@ func (t *http2Client) reader() {
// loop to keep reading incoming messages on this transport.
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
if t.keepaliveEnabled {
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
@ -1320,6 +1333,7 @@ func (t *http2Client) keepalive() {
timer.Reset(t.kp.Time)
continue
}
infof("transport: closing client transport due to idleness.")
t.Close()
return
case <-t.ctx.Done():

View file

@ -35,9 +35,11 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
@ -55,6 +57,9 @@ var (
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
// statusRawProto is a function to get to the raw status proto wrapped in a
// status.Status without a proto.Clone().
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
)
// http2Server implements the ServerTransport interface with HTTP2.
@ -119,6 +124,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czData *channelzData
bufferPool *bufferPool
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -220,6 +226,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
kep: kep,
initialWindowSize: iwz,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow {
@ -405,9 +412,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -428,6 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
defer close(t.readerDone)
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil {
@ -591,9 +600,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
data := make([]byte, len(f.Data()))
copy(data, f.Data())
s.write(recvMsg{data: data})
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
}
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
@ -757,6 +767,10 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
return nil
}
func (t *http2Server) setResetPingStrikes() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
func (t *http2Server) writeHeaderLocked(s *Stream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
@ -771,9 +785,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
onWrite: t.setResetPingStrikes,
})
if !success {
if err != nil {
@ -817,7 +829,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
@ -833,9 +845,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
streamID: s.id,
hf: headerFields,
endStream: true,
onWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
onWrite: t.setResetPingStrikes,
}
s.hdrMu.Unlock()
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
@ -887,12 +897,10 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
df := &dataFrame{
streamID: s.id,
h: hdr,
d: data,
onEachWrite: func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
streamID: s.id,
h: hdr,
d: data,
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
@ -958,6 +966,7 @@ func (t *http2Server) keepalive() {
select {
case <-maxAge.C:
// Close the connection after grace period.
infof("transport: closing server transport due to maximum connection age.")
t.Close()
// Resetting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
@ -971,6 +980,7 @@ func (t *http2Server) keepalive() {
continue
}
if pingSent {
infof("transport: closing server transport due to idleness.")
t.Close()
// Resetting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
@ -1019,13 +1029,7 @@ func (t *http2Server) Close() error {
}
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
oldState = s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return oldState
}
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
@ -1047,15 +1051,13 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState stream
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
}
return oldState
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
oldState := t.deleteStream(s, eosReceived)
// If the stream is already closed, then don't put trailing header to controlbuf.
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return
}
@ -1063,14 +1065,18 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
onWrite: func() {
t.deleteStream(s, eosReceived)
},
}
t.controlBuf.put(hdr)
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: rst,

View file

@ -22,6 +22,7 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -39,10 +40,32 @@ import (
"google.golang.org/grpc/tap"
)
type bufferPool struct {
pool sync.Pool
}
func newBufferPool() *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
}
}
func (p *bufferPool) get() *bytes.Buffer {
return p.pool.Get().(*bytes.Buffer)
}
func (p *bufferPool) put(b *bytes.Buffer) {
p.pool.Put(b)
}
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
data []byte
buffer *bytes.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@ -117,8 +140,9 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last []byte // Stores the remaining data in the previous calls.
last *bytes.Buffer // Stores the remaining data in the previous calls.
err error
freeBuffer func(*bytes.Buffer)
}
// Read reads the next len(p) bytes from last. If last is drained, it tries to
@ -128,10 +152,13 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil && len(r.last) > 0 {
if r.last != nil {
// Read remaining data left in last call.
copied := copy(p, r.last)
r.last = r.last[copied:]
copied, _ := r.last.Read(p)
if r.last.Len() == 0 {
r.freeBuffer(r.last)
r.last = nil
}
return copied, nil
}
if r.closeStream != nil {
@ -157,6 +184,19 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// r.readAdditional acts on that message and returns the necessary error.
select {
case <-r.ctxDone:
// Note that this adds the ctx error to the end of recv buffer, and
// reads from the head. This will delay the error until recv buffer is
// empty, thus will delay ctx cancellation in Recv().
//
// It's done this way to fix a race between ctx cancel and trailer. The
// race was, stream.Recv() may return ctx error if ctxDone wins the
// race, but stream.Trailer() may return a non-nil md because the stream
// was not marked as done when trailer is received. This closeStream
// call will mark stream as done, thus fix the race.
//
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, p)
@ -170,8 +210,13 @@ func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error
if m.err != nil {
return 0, m.err
}
copied := copy(p, m.data)
r.last = m.data[copied:]
copied, _ := m.buffer.Read(p)
if m.buffer.Len() == 0 {
r.freeBuffer(m.buffer)
r.last = nil
} else {
r.last = m.buffer
}
return copied, nil
}
@ -204,8 +249,8 @@ type Stream struct {
// is used to adjust flow control, if needed.
requestRead func(int)
headerChan chan struct{} // closed to indicate the end of header metadata.
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
headerChan chan struct{} // closed to indicate the end of header metadata.
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
@ -266,6 +311,14 @@ func (s *Stream) waitOnHeader() error {
}
select {
case <-s.ctx.Done():
// We prefer success over failure when reading messages because we delay
// context error in stream.Read(). To keep behavior consistent, we also
// prefer success here.
select {
case <-s.headerChan:
return nil
default:
}
return ContextErr(s.ctx.Err())
case <-s.headerChan:
return nil
@ -578,9 +631,12 @@ type ClientTransport interface {
// is called only once.
Close() error
// GracefulClose starts to tear down the transport. It stops accepting
// new RPCs and wait the completion of the pending RPCs.
GracefulClose() error
// GracefulClose starts to tear down the transport: the transport will stop
// accepting new RPCs and NewStream will return error. Once all streams are
// finished, the transport will close.
//
// It does not block.
GracefulClose()
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.

View file

@ -17,9 +17,8 @@
*/
// Package naming defines the naming API and related data structures for gRPC.
// The interface is EXPERIMENTAL and may be subject to change.
//
// Deprecated: please use package resolver.
// This package is deprecated: please use package resolver instead.
package naming
// Operation defines the corresponding operations for a name resolution change.

View file

@ -120,6 +120,14 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
bp.mu.Unlock()
select {
case <-ctx.Done():
if connectionErr := bp.connectionError(); connectionErr != nil {
switch ctx.Err() {
case context.DeadlineExceeded:
return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
case context.Canceled:
return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
}
}
return nil, nil, ctx.Err()
case <-ch:
}

View file

@ -51,14 +51,18 @@ type pickfirstBalancer struct {
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil {
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
}
return
}
if b.sc == nil {
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
if err != nil {
//TODO(yuxuanli): why not change the cc state to Idle?
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
if grpclog.V(2) {
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
return
}
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
@ -70,9 +74,13 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
}
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
}
if b.sc != sc {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
}
return
}
if s == connectivity.Shutdown {

64
vendor/google.golang.org/grpc/preloader.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// This API is EXPERIMENTAL.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte
hdr []byte
payload []byte
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
ctx := s.Context()
rpcInfo, ok := rpcInfoFromContext(ctx)
if !ok {
return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
}
// check if the context has the relevant information to prepareMsg
if rpcInfo.preloaderInfo == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
}
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
// prepare the msg
data, err := encode(rpcInfo.preloaderInfo.codec, msg)
if err != nil {
return err
}
p.encodedData = data
compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
if err != nil {
return err
}
p.hdr, p.payload = msgHeader(data, compData)
return nil
}

View file

@ -66,6 +66,9 @@ var (
var (
defaultResolver netResolver = net.DefaultResolver
// To prevent excessive re-resolution, we enforce a rate limit on DNS
// resolution requests.
minDNSResRate = 30 * time.Second
)
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
@ -241,7 +244,13 @@ func (d *dnsResolver) watcher() {
return
case <-d.t.C:
case <-d.rn:
if !d.t.Stop() {
// Before resetting a timer, it should be stopped to prevent racing with
// reads on it's channel.
<-d.t.C
}
}
result, sc := d.lookup()
// Next lookup should happen within an interval defined by d.freq. It may be
// more often due to exponential retry on empty address list.
@ -254,6 +263,16 @@ func (d *dnsResolver) watcher() {
}
d.cc.NewServiceConfig(sc)
d.cc.NewAddress(result)
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
// will be queued in d.rn.
t := time.NewTimer(minDNSResRate)
select {
case <-t.C:
case <-d.ctx.Done():
t.Stop()
return
}
}
}

View file

@ -20,6 +20,10 @@
// All APIs in this package are experimental.
package resolver
import (
"google.golang.org/grpc/serviceconfig"
)
var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
@ -100,11 +104,12 @@ type BuildOption struct {
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
Addresses []Address // Resolved addresses for the target
ServiceConfig string // JSON representation of the service config
Addresses []Address // Resolved addresses for the target
// ServiceConfig is the parsed service config; obtained from
// serviceconfig.Parse.
ServiceConfig serviceconfig.Config
// TODO: add Err error
// TODO: add ParsedServiceConfig interface{}
}
// ClientConn contains the callbacks for resolver to notify any updates
@ -132,6 +137,21 @@ type ClientConn interface {
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
// grpc passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
//
// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
// be the full target string. e.g. "foo.bar" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
//
// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
type Target struct {
Scheme string
Authority string

View file

@ -138,19 +138,22 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
return
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
c, err := parseServiceConfig(sc)
if err != nil {
return
}
ccr.curState.ServiceConfig = sc
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
}
ccr.curState.ServiceConfig = c
ccr.cc.updateResolverState(ccr.curState)
}
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
return
}
var updates []string
if s.ServiceConfig != ccr.curState.ServiceConfig {
oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
newSC, newOK := s.ServiceConfig.(*ServiceConfig)
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
updates = append(updates, "service config updated")
}
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {

View file

@ -694,14 +694,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
return nil
}
// Information about RPC
type rpcInfo struct {
failfast bool
failfast bool
preloaderInfo *compressorInfo
}
// Information about Preloader
// Responsible for storing codec, and compressors
// If stream (s) has context s.Context which stores rpcInfo that has non nil
// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
// and reuse marshalled bytes
type compressorInfo struct {
codec baseCodec
cp Compressor
comp encoding.Compressor
}
type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
preloaderInfo: &compressorInfo{
codec: codec,
cp: cp,
comp: comp,
},
})
}
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {

View file

@ -42,6 +42,7 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@ -56,6 +57,8 @@ const (
defaultServerMaxSendMessageSize = math.MaxInt32
)
var statusOK = status.New(codes.OK, "")
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification.
@ -86,21 +89,19 @@ type service struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts options
opts serverOptions
mu sync.Mutex // guards following
lis map[net.Listener]bool
conns map[io.Closer]bool
conns map[transport.ServerTransport]bool
serve bool
drain bool
cv *sync.Cond // signaled when connections close for GracefulStop
m map[string]*service // service name -> service info
events trace.EventLog
quit chan struct{}
done chan struct{}
quitOnce sync.Once
doneOnce sync.Once
quit *grpcsync.Event
done *grpcsync.Event
channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
@ -108,7 +109,7 @@ type Server struct {
czData *channelzData
}
type options struct {
type serverOptions struct {
creds credentials.TransportCredentials
codec baseCodec
cp Compressor
@ -131,7 +132,7 @@ type options struct {
maxHeaderListSize *uint32
}
var defaultServerOptions = options{
var defaultServerOptions = serverOptions{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
@ -140,7 +141,33 @@ var defaultServerOptions = options{
}
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
type ServerOption interface {
apply(*serverOptions)
}
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
// This API is EXPERIMENTAL.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
// funcServerOption wraps a function that modifies serverOptions into an
// implementation of the ServerOption interface.
type funcServerOption struct {
f func(*serverOptions)
}
func (fdo *funcServerOption) apply(do *serverOptions) {
fdo.f(do)
}
func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
return &funcServerOption{
f: f,
}
}
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
@ -148,9 +175,9 @@ type ServerOption func(*options)
// Zero will disable the write buffer such that each write will be on underlying connection.
// Note: A Send call may not directly translate to a write.
func WriteBufferSize(s int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.writeBufferSize = s
}
})
}
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
@ -159,25 +186,25 @@ func WriteBufferSize(s int) ServerOption {
// Zero will disable read buffer for a connection so data framer can access the underlying
// conn directly.
func ReadBufferSize(s int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.readBufferSize = s
}
})
}
// InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.initialWindowSize = s
}
})
}
// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.initialConnWindowSize = s
}
})
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
@ -187,25 +214,25 @@ func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
kp.Time = time.Second
}
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.keepaliveParams = kp
}
})
}
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.keepalivePolicy = kep
}
})
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
//
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
}
})
}
// RPCCompressor returns a ServerOption that sets a compressor for outbound
@ -216,9 +243,9 @@ func CustomCodec(codec Codec) ServerOption {
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.cp = cp
}
})
}
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
@ -227,9 +254,9 @@ func RPCCompressor(cp Compressor) ServerOption {
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.dc = dc
}
})
}
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
@ -243,73 +270,73 @@ func MaxMsgSize(m int) ServerOption {
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxReceiveMessageSize = m
}
})
}
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default `math.MaxInt32`.
func MaxSendMsgSize(m int) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxSendMessageSize = m
}
})
}
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
}
})
}
// Creds returns a ServerOption that sets credentials for server connections.
func Creds(c credentials.TransportCredentials) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.creds = c
}
})
}
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
// server. Only one unary interceptor can be installed. The construction of multiple
// interceptors (e.g., chaining) can be implemented at the caller.
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.unaryInt != nil {
panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
})
}
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
// server. Only one stream interceptor can be installed.
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.streamInt != nil {
panic("The stream server interceptor was already set and may not be reset.")
}
o.streamInt = i
}
})
}
// InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed.
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
if o.inTapHandle != nil {
panic("The tap handle was already set and may not be reset.")
}
o.inTapHandle = h
}
})
}
// StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.statsHandler = h
}
})
}
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
@ -319,7 +346,7 @@ func StatsHandler(h stats.Handler) ServerOption {
// The handling function has full access to the Context of the request and the
// stream, and the invocation bypasses interceptors.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.unknownStreamDesc = &StreamDesc{
StreamName: "unknown_service_handler",
Handler: streamHandler,
@ -327,7 +354,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
ClientStreams: true,
ServerStreams: true,
}
}
})
}
// ConnectionTimeout returns a ServerOption that sets the timeout for
@ -337,17 +364,17 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
//
// This API is EXPERIMENTAL.
func ConnectionTimeout(d time.Duration) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d
}
})
}
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
// of header list that the server is prepared to accept.
func MaxHeaderListSize(s uint32) ServerOption {
return func(o *options) {
return newFuncServerOption(func(o *serverOptions) {
o.maxHeaderListSize = &s
}
})
}
// NewServer creates a gRPC server which has no service registered and has not
@ -355,15 +382,15 @@ func MaxHeaderListSize(s uint32) ServerOption {
func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions
for _, o := range opt {
o(&opts)
o.apply(&opts)
}
s := &Server{
lis: make(map[net.Listener]bool),
opts: opts,
conns: make(map[io.Closer]bool),
conns: make(map[transport.ServerTransport]bool),
m: make(map[string]*service),
quit: make(chan struct{}),
done: make(chan struct{}),
quit: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
czData: new(channelzData),
}
s.cv = sync.NewCond(&s.mu)
@ -530,11 +557,9 @@ func (s *Server) Serve(lis net.Listener) error {
s.serveWG.Add(1)
defer func() {
s.serveWG.Done()
select {
// Stop or GracefulStop called; block until done and return nil.
case <-s.quit:
<-s.done
default:
if s.quit.HasFired() {
// Stop or GracefulStop called; block until done and return nil.
<-s.done.Done()
}
}()
@ -577,7 +602,7 @@ func (s *Server) Serve(lis net.Listener) error {
timer := time.NewTimer(tempDelay)
select {
case <-timer.C:
case <-s.quit:
case <-s.quit.Done():
timer.Stop()
return nil
}
@ -587,10 +612,8 @@ func (s *Server) Serve(lis net.Listener) error {
s.printf("done serving; Accept = %v", err)
s.mu.Unlock()
select {
case <-s.quit:
if s.quit.HasFired() {
return nil
default:
}
return err
}
@ -611,6 +634,10 @@ func (s *Server) Serve(lis net.Listener) error {
// handleRawConn forks a goroutine to handle a just-accepted connection that
// has not had any I/O performed on it yet.
func (s *Server) handleRawConn(rawConn net.Conn) {
if s.quit.HasFired() {
rawConn.Close()
return
}
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
if err != nil {
@ -627,14 +654,6 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
return
}
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
conn.Close()
return
}
s.mu.Unlock()
// Finish handshaking (HTTP2)
st := s.newHTTP2Transport(conn, authInfo)
if st == nil {
@ -742,6 +761,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
// If tracing is not enabled, it returns nil.
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
if !EnableTracing {
return nil
}
tr, ok := trace.FromContext(stream.Context())
if !ok {
return nil
@ -760,27 +782,27 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
return trInfo
}
func (s *Server) addConn(c io.Closer) bool {
func (s *Server) addConn(st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
c.Close()
st.Close()
return false
}
if s.drain {
// Transport added after we drained our existing conns: drain it
// immediately.
c.(transport.ServerTransport).Drain()
st.Drain()
}
s.conns[c] = true
s.conns[st] = true
return true
}
func (s *Server) removeConn(c io.Closer) {
func (s *Server) removeConn(st transport.ServerTransport) {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns != nil {
delete(s.conns, c)
delete(s.conns, st)
s.cv.Broadcast()
}
}
@ -952,10 +974,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
if sh != nil {
sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
Data: d,
Length: len(d),
RecvTime: time.Now(),
Payload: v,
WireLength: payInfo.wireLength,
Data: d,
Length: len(d),
})
}
if binlog != nil {
@ -1051,7 +1074,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
// TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
err = t.WriteStatus(stream, status.New(codes.OK, ""))
err = t.WriteStatus(stream, statusOK)
if binlog != nil {
binlog.Log(&binarylog.ServerTrailer{
Trailer: stream.Trailer(),
@ -1209,7 +1232,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
err = t.WriteStatus(ss.s, statusOK)
if ss.binlog != nil {
ss.binlog.Log(&binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
@ -1326,15 +1349,11 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
s.quitOnce.Do(func() {
close(s.quit)
})
s.quit.Fire()
defer func() {
s.serveWG.Wait()
s.doneOnce.Do(func() {
close(s.done)
})
s.done.Fire()
}()
s.channelzRemoveOnce.Do(func() {
@ -1371,15 +1390,8 @@ func (s *Server) Stop() {
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
s.quitOnce.Do(func() {
close(s.quit)
})
defer func() {
s.doneOnce.Do(func() {
close(s.done)
})
}()
s.quit.Fire()
defer s.done.Fire()
s.channelzRemoveOnce.Do(func() {
if channelz.IsOn() {
@ -1397,8 +1409,8 @@ func (s *Server) GracefulStop() {
}
s.lis = nil
if !s.drain {
for c := range s.conns {
c.(transport.ServerTransport).Drain()
for st := range s.conns {
st.Drain()
}
s.drain = true
}

View file

@ -25,8 +25,11 @@ import (
"strings"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
)
const maxInt = int(^uint(0) >> 1)
@ -61,6 +64,11 @@ type MethodConfig struct {
retryPolicy *retryPolicy
}
type lbConfig struct {
name string
cfg serviceconfig.LoadBalancingConfig
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
//
@ -68,10 +76,18 @@ type MethodConfig struct {
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
serviceconfig.Config
// LB is the load balancer the service providers recommends. The balancer
// specified via grpc.WithBalancer will override this. This is deprecated;
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
// will be used.
LB *string
// lbConfig is the service config's load balancing configuration. If
// lbConfig and LB are both present, lbConfig will be used.
lbConfig *lbConfig
// Methods contains a map for the methods in this service. If there is an
// exact match for a method (i.e. /service/method) in the map, use the
// corresponding MethodConfig. If there's no exact match, look for the
@ -233,15 +249,27 @@ type jsonMC struct {
RetryPolicy *jsonRetryPolicy
}
type loadBalancingConfig map[string]json.RawMessage
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
LoadBalancingConfig *[]loadBalancingConfig
MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig
}
func init() {
internal.ParseServiceConfig = func(sc string) (interface{}, error) {
return parseServiceConfig(sc)
}
}
func parseServiceConfig(js string) (*ServiceConfig, error) {
if len(js) == 0 {
return nil, fmt.Errorf("no JSON service config provided")
}
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
@ -255,10 +283,38 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
if rsc.LoadBalancingConfig != nil {
for i, lbcfg := range *rsc.LoadBalancingConfig {
if len(lbcfg) != 1 {
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
grpclog.Warningf(err.Error())
return nil, err
}
var name string
var jsonCfg json.RawMessage
for name, jsonCfg = range lbcfg {
}
builder := balancer.Get(name)
if builder == nil {
continue
}
sc.lbConfig = &lbConfig{name: name}
if parser, ok := builder.(balancer.ConfigParser); ok {
var err error
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
if err != nil {
return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
}
} else if string(jsonCfg) != "{}" {
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
break
}
}
if rsc.MethodConfig == nil {
return &sc, nil
}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
@ -299,11 +355,11 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
}
if sc.retryThrottling != nil {
if sc.retryThrottling.MaxTokens <= 0 ||
sc.retryThrottling.MaxTokens > 1000 ||
sc.retryThrottling.TokenRatio <= 0 {
// Illegal throttling config; disable throttling.
sc.retryThrottling = nil
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
}
if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
}
}
return &sc, nil

View file

@ -0,0 +1,48 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// This package is EXPERIMENTAL.
package serviceconfig
import (
"google.golang.org/grpc/internal"
)
// Config represents an opaque data structure holding a service config.
type Config interface {
isConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
// balancer config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
// Parse parses the JSON service config provided into an internal form or
// returns an error if the config is invalid.
func Parse(ServiceConfigJSON string) (Config, error) {
c, err := internal.ParseServiceConfig(ServiceConfigJSON)
if err != nil {
return nil, err
}
return c.(Config), err
}

View file

@ -36,8 +36,15 @@ import (
"github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal"
)
func init() {
internal.StatusRawProto = statusRawProto
}
func statusRawProto(s *Status) *spb.Status { return s.s }
// statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package.
type statusError spb.Status
@ -51,6 +58,17 @@ func (se *statusError) GRPCStatus() *Status {
return &Status{s: (*spb.Status)(se)}
}
// Is implements future error.Is functionality.
// A statusError is equivalent if the code and message are identical.
func (se *statusError) Is(target error) bool {
tse, ok := target.(*statusError)
if !ok {
return false
}
return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
}
// Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto.
type Status struct {
@ -125,7 +143,7 @@ func FromProto(s *spb.Status) *Status {
// Status is returned with codes.Unknown and the original error message.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
return nil, true
}
if se, ok := err.(interface {
GRPCStatus() *Status
@ -199,7 +217,7 @@ func Code(err error) codes.Code {
func FromContextError(err error) *Status {
switch err {
case nil:
return New(codes.OK, "")
return nil
case context.DeadlineExceeded:
return New(codes.DeadlineExceeded, err.Error())
case context.Canceled:

View file

@ -30,7 +30,6 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/balancerload"
@ -245,7 +244,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
trInfo.tr.LazyLog(&trInfo.firstLine, false)
ctx = trace.NewContext(ctx, trInfo.tr)
}
ctx = newContextWithRPCInfo(ctx, c.failFast)
ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
sh := cc.dopts.copts.StatsHandler
var beginTime time.Time
if sh != nil {
@ -328,13 +327,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cs, nil
}
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
cs.attempt = &csAttempt{
// newAttemptLocked creates a new attempt with a transport.
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
newAttempt := &csAttempt{
cs: cs,
dc: cs.cc.dopts.dc,
statsHandler: sh,
trInfo: trInfo,
}
defer func() {
if retErr != nil {
// This attempt is not set in the clientStream, so it's finish won't
// be called. Call it here for stats and trace in case they are not
// nil.
newAttempt.finish(retErr)
}
}()
if err := cs.ctx.Err(); err != nil {
return toRPCErr(err)
@ -346,8 +355,9 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) er
if trInfo != nil {
trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
}
cs.attempt.t = t
cs.attempt.done = done
newAttempt.t = t
newAttempt.done = done
cs.attempt = newAttempt
return nil
}
@ -396,11 +406,18 @@ type clientStream struct {
serverHeaderBinlogged bool
mu sync.Mutex
firstAttempt bool // if true, transparent retry is valid
numRetries int // exclusive of transparent retry attempt(s)
numRetriesSincePushback int // retries since pushback; to reset backoff
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
attempt *csAttempt // the active client stream attempt
firstAttempt bool // if true, transparent retry is valid
numRetries int // exclusive of transparent retry attempt(s)
numRetriesSincePushback int // retries since pushback; to reset backoff
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
// attempt is the active client stream attempt.
// The only place where it is written is the newAttemptLocked method and this method never writes nil.
// So, attempt can be nil only inside newClientStream function when clientStream is first created.
// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
committed bool // active attempt committed for retry?
buffer []func(a *csAttempt) error // operations to replay on retry
@ -458,8 +475,8 @@ func (cs *clientStream) shouldRetry(err error) error {
if cs.attempt.s != nil {
<-cs.attempt.s.Done()
}
if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
// First attempt, wait-for-ready, stream unprocessed: transparently retry.
if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
// First attempt, stream unprocessed: transparently retry.
cs.firstAttempt = false
return nil
}
@ -677,15 +694,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if !cs.desc.ClientStreams {
cs.sentLast = true
}
data, err := encode(cs.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
if err != nil {
return err
}
compData, err := compress(data, cs.cp, cs.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
@ -808,11 +823,11 @@ func (cs *clientStream) finish(err error) {
}
if cs.attempt != nil {
cs.attempt.finish(err)
}
// after functions all rely upon having a stream.
if cs.attempt.s != nil {
for _, o := range cs.opts {
o.after(cs.callInfo)
// after functions all rely upon having a stream.
if cs.attempt.s != nil {
for _, o := range cs.opts {
o.after(cs.callInfo)
}
}
}
cs.cancel()
@ -967,19 +982,18 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
ac.mu.Lock()
if ac.transport != t {
ac.mu.Unlock()
return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
}
// transition to CONNECTING state when an attempt starts
if ac.state != connectivity.Connecting {
ac.updateConnectivityState(connectivity.Connecting)
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
ac.mu.Unlock()
// newClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
// of using ac.transpot.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
// - no service config (or wait for service config)
// - no tracing or stats
func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
if t == nil {
// TODO: return RPC error here?
return nil, errors.New("transport provided is nil")
@ -987,14 +1001,6 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
c := &callInfo{}
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
// Possible context leak:
// The cancel function for the child context we create will only be called
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
@ -1007,6 +1013,13 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
}
}()
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
if err := setCallInfoCodec(c); err != nil {
return nil, err
}
@ -1039,6 +1052,7 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
callHdr.Creds = c.creds
}
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{
callHdr: callHdr,
ac: ac,
@ -1150,15 +1164,13 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
if !as.desc.ClientStreams {
as.sentLast = true
}
data, err := encode(as.codec, m)
// load hdr, payload, data
hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
if err != nil {
return err
}
compData, err := compress(data, as.cp, as.comp)
if err != nil {
return err
}
hdr, payld := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payld) > *as.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
@ -1395,15 +1407,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
ss.t.IncrMsgSent()
}
}()
data, err := encode(ss.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
if err != nil {
return err
}
compData, err := compress(data, ss.cp, ss.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > ss.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
@ -1496,3 +1506,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
// prepareMsg returns the hdr, payload and data
// using the compressors passed or using the
// passed preparedmsg
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
return nil, nil, nil, err
}
compData, err := compress(data, cp, comp)
if err != nil {
return nil, nil, nil, err
}
hdr, payload = msgHeader(data, compData)
return hdr, payload, data, nil
}

View file

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.20.1"
const Version = "1.23.0"