Update containerd to cf554d59dd96e459544748290eb91
This bumps containerd to cf554d59dd96e459544748290eb9167f4bcde509 and includes various fixes and updates the grpc package and types generated for use. Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
ce96ce9e05
commit
d17b9f3da0
67 changed files with 5898 additions and 2010 deletions
|
@ -244,7 +244,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -191,7 +191,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -200,7 +200,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -85,7 +85,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -215,7 +215,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -208,7 +208,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -68,7 +68,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install containerd
|
||||
ENV CONTAINERD_COMMIT 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
ENV CONTAINERD_COMMIT cf554d59dd96e459544748290eb9167f4bcde509
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
|
||||
|
|
|
@ -56,7 +56,7 @@ clone git github.com/mattn/go-sqlite3 v1.1.0
|
|||
clone git github.com/tchap/go-patricia v2.1.0
|
||||
clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
|
||||
# forked golang.org/x/net package includes a patch for lazy loading trace templates
|
||||
clone git golang.org/x/net 78cb2c067747f08b343f20614155233ab4ea2ad3 https://github.com/tonistiigi/net.git
|
||||
clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git
|
||||
clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
|
||||
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
|
||||
clone git github.com/docker/go-connections v0.2.0
|
||||
|
@ -97,7 +97,7 @@ clone git github.com/pborman/uuid v1.0
|
|||
# get desired notary commit, might also need to be updated in Dockerfile
|
||||
clone git github.com/docker/notary v0.3.0
|
||||
|
||||
clone git google.golang.org/grpc a22b6611561e9f0a3e0919690dd2caf48f14c517 https://github.com/grpc/grpc-go.git
|
||||
clone git google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git
|
||||
clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||
clone git github.com/docker/go v1.5.1-1-1-gbaf439e
|
||||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
@ -109,7 +109,7 @@ clone git github.com/seccomp/libseccomp-golang 60c9953736798c4a04e90d0f3da2f933d
|
|||
clone git github.com/coreos/go-systemd v4
|
||||
clone git github.com/godbus/dbus v4.0.0
|
||||
clone git github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
||||
clone git github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
clone git github.com/golang/protobuf 3c84672111d91bb5ac31719e112f9f7126a0e26e
|
||||
|
||||
# gelf logging driver deps
|
||||
clone git github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
|
||||
|
@ -136,7 +136,7 @@ clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https
|
|||
clone git github.com/docker/docker-credential-helpers v0.3.0
|
||||
|
||||
# containerd
|
||||
clone git github.com/docker/containerd 57b7c3da915ebe943bd304c00890959b191e5264
|
||||
clone git github.com/docker/containerd cf554d59dd96e459544748290eb9167f4bcde509
|
||||
|
||||
# cli
|
||||
clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git
|
||||
|
|
|
@ -115,14 +115,17 @@ func (*UpdateProcessResponse) ProtoMessage() {}
|
|||
func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
type CreateContainerRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
||||
Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
|
||||
Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
|
||||
NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
||||
Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
|
||||
Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
|
||||
NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"`
|
||||
Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"`
|
||||
RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"`
|
||||
CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} }
|
||||
|
@ -233,8 +236,9 @@ func (*AddProcessResponse) ProtoMessage() {}
|
|||
func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||
|
||||
type CreateCheckpointRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} }
|
||||
|
@ -258,8 +262,9 @@ func (*CreateCheckpointResponse) ProtoMessage() {}
|
|||
func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||
|
||||
type DeleteCheckpointRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
||||
CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} }
|
||||
|
@ -276,7 +281,8 @@ func (*DeleteCheckpointResponse) ProtoMessage() {}
|
|||
func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||
|
||||
type ListCheckpointRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} }
|
||||
|
@ -446,16 +452,17 @@ func (m *UpdateContainerRequest) GetResources() *UpdateResource {
|
|||
}
|
||||
|
||||
type UpdateResource struct {
|
||||
BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"`
|
||||
CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"`
|
||||
CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"`
|
||||
CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"`
|
||||
CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"`
|
||||
CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"`
|
||||
MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"`
|
||||
MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"`
|
||||
MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"`
|
||||
KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"`
|
||||
BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"`
|
||||
CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"`
|
||||
CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"`
|
||||
CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"`
|
||||
CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"`
|
||||
CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"`
|
||||
MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"`
|
||||
MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"`
|
||||
MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"`
|
||||
KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"`
|
||||
KernelTCPMemoryLimit uint32 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"`
|
||||
}
|
||||
|
||||
func (m *UpdateResource) Reset() { *m = UpdateResource{} }
|
||||
|
@ -495,14 +502,14 @@ func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
|
|||
|
||||
type NetworkStats struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes" json:"rx_bytes,omitempty"`
|
||||
Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets" json:"rx_Packets,omitempty"`
|
||||
RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors" json:"Rx_errors,omitempty"`
|
||||
RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped" json:"Rx_dropped,omitempty"`
|
||||
TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes" json:"Tx_bytes,omitempty"`
|
||||
TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets" json:"Tx_packets,omitempty"`
|
||||
TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors" json:"Tx_errors,omitempty"`
|
||||
TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped" json:"Tx_dropped,omitempty"`
|
||||
RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"`
|
||||
Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"`
|
||||
RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=rxErrors" json:"Rx_errors,omitempty"`
|
||||
RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=rxDropped" json:"Rx_dropped,omitempty"`
|
||||
TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=txBytes" json:"Tx_bytes,omitempty"`
|
||||
TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=txPackets" json:"Tx_packets,omitempty"`
|
||||
TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=txErrors" json:"Tx_errors,omitempty"`
|
||||
TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=txDropped" json:"Tx_dropped,omitempty"`
|
||||
}
|
||||
|
||||
func (m *NetworkStats) Reset() { *m = NetworkStats{} }
|
||||
|
@ -511,10 +518,10 @@ func (*NetworkStats) ProtoMessage() {}
|
|||
func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
|
||||
|
||||
type CpuUsage struct {
|
||||
TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"`
|
||||
PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage" json:"percpu_usage,omitempty"`
|
||||
UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode" json:"usage_in_kernelmode,omitempty"`
|
||||
UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode" json:"usage_in_usermode,omitempty"`
|
||||
TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"`
|
||||
PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
|
||||
UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"`
|
||||
UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CpuUsage) Reset() { *m = CpuUsage{} }
|
||||
|
@ -524,8 +531,8 @@ func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31
|
|||
|
||||
type ThrottlingData struct {
|
||||
Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods" json:"throttled_periods,omitempty"`
|
||||
ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time" json:"throttled_time,omitempty"`
|
||||
ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"`
|
||||
ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ThrottlingData) Reset() { *m = ThrottlingData{} }
|
||||
|
@ -534,9 +541,9 @@ func (*ThrottlingData) ProtoMessage() {}
|
|||
func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
|
||||
|
||||
type CpuStats struct {
|
||||
CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"`
|
||||
ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data" json:"throttling_data,omitempty"`
|
||||
SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage" json:"system_usage,omitempty"`
|
||||
CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"`
|
||||
ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"`
|
||||
SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CpuStats) Reset() { *m = CpuStats{} }
|
||||
|
@ -570,7 +577,7 @@ func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3
|
|||
|
||||
type MemoryData struct {
|
||||
Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"`
|
||||
MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"`
|
||||
MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"`
|
||||
Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"`
|
||||
Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"`
|
||||
}
|
||||
|
@ -583,8 +590,8 @@ func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{
|
|||
type MemoryStats struct {
|
||||
Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"`
|
||||
Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"`
|
||||
SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage" json:"swap_usage,omitempty"`
|
||||
KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage" json:"kernel_usage,omitempty"`
|
||||
SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"`
|
||||
KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"`
|
||||
Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||
}
|
||||
|
||||
|
@ -634,14 +641,14 @@ func (*BlkioStatsEntry) ProtoMessage() {}
|
|||
func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
|
||||
|
||||
type BlkioStats struct {
|
||||
IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"`
|
||||
IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive" json:"io_serviced_recursive,omitempty"`
|
||||
IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive" json:"io_queued_recursive,omitempty"`
|
||||
IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive" json:"io_service_time_recursive,omitempty"`
|
||||
IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive" json:"io_wait_time_recursive,omitempty"`
|
||||
IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive" json:"io_merged_recursive,omitempty"`
|
||||
IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive" json:"io_time_recursive,omitempty"`
|
||||
SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive" json:"sectors_recursive,omitempty"`
|
||||
IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"`
|
||||
IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"`
|
||||
IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"`
|
||||
IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"`
|
||||
IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"`
|
||||
IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"`
|
||||
IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"`
|
||||
SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BlkioStats) Reset() { *m = BlkioStats{} }
|
||||
|
@ -707,7 +714,7 @@ func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry {
|
|||
|
||||
type HugetlbStats struct {
|
||||
Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"`
|
||||
MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"`
|
||||
MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"`
|
||||
Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"`
|
||||
Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"`
|
||||
}
|
||||
|
@ -718,11 +725,11 @@ func (*HugetlbStats) ProtoMessage() {}
|
|||
func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
|
||||
|
||||
type CgroupStats struct {
|
||||
CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"`
|
||||
MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats" json:"memory_stats,omitempty"`
|
||||
BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats" json:"blkio_stats,omitempty"`
|
||||
HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats" json:"pids_stats,omitempty"`
|
||||
CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"`
|
||||
MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"`
|
||||
BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"`
|
||||
HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CgroupStats) Reset() { *m = CgroupStats{} }
|
||||
|
@ -766,8 +773,8 @@ func (m *CgroupStats) GetPidsStats() *PidsStats {
|
|||
}
|
||||
|
||||
type StatsResponse struct {
|
||||
NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"`
|
||||
CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"`
|
||||
NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"`
|
||||
CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -849,6 +856,10 @@ func init() {
|
|||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion2
|
||||
|
||||
// Client API for API service
|
||||
|
||||
type APIClient interface {
|
||||
|
@ -1026,124 +1037,184 @@ func RegisterAPIServer(s *grpc.Server, srv APIServer) {
|
|||
s.RegisterService(&_API_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetServerVersionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).GetServerVersion(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).GetServerVersion(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/GetServerVersion",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateContainerRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).CreateContainer(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).CreateContainer(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/CreateContainer",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateContainerRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).UpdateContainer(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).UpdateContainer(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/UpdateContainer",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignalRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).Signal(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).Signal(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/Signal",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).Signal(ctx, req.(*SignalRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateProcessRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).UpdateProcess(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).UpdateProcess(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/UpdateProcess",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AddProcessRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).AddProcess(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).AddProcess(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/AddProcess",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateCheckpointRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).CreateCheckpoint(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).CreateCheckpoint(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/CreateCheckpoint",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteCheckpointRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).DeleteCheckpoint(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).DeleteCheckpoint(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/DeleteCheckpoint",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListCheckpointRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).ListCheckpoint(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ListCheckpoint(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/ListCheckpoint",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).State(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).State(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/State",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).State(ctx, req.(*StateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
|
@ -1167,16 +1238,22 @@ func (x *aPIEventsServer) Send(m *Event) error {
|
|||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StatsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(APIServer).Stats(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).Stats(ctx, in)
|
||||
}
|
||||
return out, nil
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/types.API/Stats",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).Stats(ctx, req.(*StatsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _API_serviceDesc = grpc.ServiceDesc{
|
||||
|
@ -1238,148 +1315,152 @@ var _API_serviceDesc = grpc.ServiceDesc{
|
|||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 2285 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x19, 0xcb, 0x72, 0x1c, 0x49,
|
||||
0xd1, 0xf3, 0x94, 0x26, 0xe7, 0x21, 0xa9, 0xfd, 0xd0, 0x78, 0x76, 0xed, 0x35, 0x1d, 0xc0, 0x1a,
|
||||
0x58, 0x84, 0x91, 0x77, 0x03, 0x07, 0x04, 0x44, 0xac, 0x65, 0xb3, 0x98, 0xb5, 0x16, 0xb9, 0x25,
|
||||
0xb1, 0x17, 0x22, 0x26, 0x5a, 0x33, 0xe5, 0x99, 0x46, 0x33, 0xdd, 0xbd, 0xdd, 0x35, 0xd2, 0xe8,
|
||||
0xc2, 0x11, 0x6e, 0xfc, 0x00, 0x11, 0x5c, 0xb8, 0x71, 0xe7, 0xc0, 0x17, 0xf0, 0x27, 0xc4, 0x5e,
|
||||
0xb8, 0x73, 0x24, 0xab, 0x32, 0xbb, 0xba, 0x7a, 0x1e, 0xd2, 0x72, 0x20, 0xb8, 0xec, 0x65, 0xa2,
|
||||
0x32, 0x2b, 0x2b, 0x33, 0x2b, 0xdf, 0x5d, 0x03, 0x0d, 0x3f, 0x0e, 0xf6, 0xe2, 0x24, 0x92, 0x91,
|
||||
0x53, 0x93, 0x57, 0xb1, 0x48, 0xdd, 0xfb, 0xb0, 0xfb, 0x89, 0x90, 0xc7, 0x22, 0xb9, 0x10, 0xc9,
|
||||
0xaf, 0x45, 0x92, 0x06, 0x51, 0xe8, 0x89, 0x2f, 0x66, 0x22, 0x95, 0xee, 0x1c, 0xba, 0xcb, 0x5b,
|
||||
0x69, 0x1c, 0x85, 0xa9, 0x70, 0xee, 0x40, 0x6d, 0xea, 0xff, 0x36, 0x4a, 0xba, 0xa5, 0x47, 0xa5,
|
||||
0xc7, 0x6d, 0x8f, 0x00, 0x8d, 0x0d, 0x42, 0xc4, 0x96, 0x19, 0xab, 0x00, 0x85, 0x8d, 0x7d, 0x39,
|
||||
0x18, 0x77, 0x2b, 0x84, 0xd5, 0x80, 0xd3, 0x83, 0xcd, 0x44, 0x5c, 0x04, 0x8a, 0x6b, 0xb7, 0x8a,
|
||||
0x1b, 0x0d, 0xcf, 0xc0, 0xee, 0xef, 0x4b, 0x70, 0xe7, 0x34, 0x1e, 0xfa, 0x52, 0x1c, 0x25, 0xd1,
|
||||
0x40, 0xa4, 0x29, 0xab, 0xe4, 0x74, 0xa0, 0x1c, 0x0c, 0xb5, 0xcc, 0x86, 0x87, 0x2b, 0x67, 0x1b,
|
||||
0x2a, 0x31, 0x22, 0xca, 0x1a, 0xa1, 0x96, 0xce, 0x43, 0x80, 0xc1, 0x24, 0x4a, 0xc5, 0xb1, 0x1c,
|
||||
0x06, 0xa1, 0x96, 0xb8, 0xe9, 0x59, 0x18, 0xa5, 0xcc, 0x65, 0x30, 0x94, 0x63, 0x2d, 0x13, 0x95,
|
||||
0xd1, 0x80, 0x73, 0x0f, 0xea, 0x63, 0x11, 0x8c, 0xc6, 0xb2, 0x5b, 0xd3, 0x68, 0x86, 0xdc, 0x5d,
|
||||
0xb8, 0xbb, 0xa0, 0x07, 0xdd, 0xdf, 0xfd, 0xb2, 0x04, 0xf7, 0x0e, 0x12, 0x81, 0x3b, 0x07, 0x51,
|
||||
0x28, 0xfd, 0x20, 0x14, 0xc9, 0x3a, 0x1d, 0x51, 0xa3, 0xb3, 0x59, 0x38, 0x9c, 0x88, 0x23, 0x1f,
|
||||
0xc5, 0x92, 0xaa, 0x16, 0x46, 0x6b, 0x3c, 0x16, 0x83, 0xf3, 0x38, 0x0a, 0x42, 0xa9, 0x35, 0xc6,
|
||||
0xfd, 0x1c, 0xa3, 0x34, 0x4e, 0xf5, 0x65, 0xc8, 0x4a, 0x04, 0x28, 0x8d, 0x71, 0x11, 0xcd, 0x48,
|
||||
0xe3, 0x86, 0xc7, 0x10, 0xe3, 0x45, 0x92, 0x74, 0xeb, 0x06, 0x8f, 0x90, 0xc2, 0x4f, 0xfc, 0x33,
|
||||
0x31, 0x49, 0xbb, 0x1b, 0x8f, 0x2a, 0x0a, 0x4f, 0x90, 0xf3, 0x08, 0x9a, 0x61, 0x74, 0x14, 0x5c,
|
||||
0x44, 0xd2, 0x8b, 0x22, 0xd9, 0xdd, 0xd4, 0x06, 0xb3, 0x51, 0xee, 0x2b, 0xd8, 0x5d, 0xba, 0x29,
|
||||
0x47, 0xc1, 0x1e, 0x34, 0x06, 0x19, 0x52, 0xdf, 0xb8, 0xb9, 0xbf, 0xbd, 0xa7, 0xe3, 0x6a, 0x2f,
|
||||
0x27, 0xce, 0x49, 0x90, 0x55, 0xfb, 0x38, 0x18, 0x85, 0xfe, 0xe4, 0xab, 0xfb, 0x53, 0xdd, 0x47,
|
||||
0x1f, 0xe1, 0xe8, 0x61, 0xc8, 0xdd, 0x86, 0x4e, 0xc6, 0x8a, 0x5d, 0xf2, 0xb7, 0x0a, 0xec, 0x7c,
|
||||
0x3c, 0x1c, 0xde, 0x10, 0x31, 0x18, 0x76, 0x52, 0x24, 0x18, 0x98, 0xc8, 0xb1, 0xac, 0x2f, 0x6b,
|
||||
0x60, 0xe7, 0x3d, 0xa8, 0xce, 0x52, 0xbc, 0x49, 0x45, 0xdf, 0xa4, 0xc9, 0x37, 0x39, 0x45, 0x94,
|
||||
0xa7, 0x37, 0x1c, 0x07, 0xaa, 0x7e, 0x32, 0x4a, 0xd1, 0x13, 0xca, 0x84, 0x7a, 0xad, 0x54, 0x16,
|
||||
0xe1, 0x05, 0x7a, 0x41, 0xa1, 0xd4, 0x52, 0x61, 0x06, 0x97, 0x43, 0xb6, 0xbf, 0x5a, 0x66, 0xd7,
|
||||
0xda, 0xc8, 0xaf, 0x65, 0x9c, 0xba, 0xb9, 0xda, 0xa9, 0x8d, 0x35, 0x4e, 0x85, 0x82, 0x53, 0x5d,
|
||||
0x68, 0x0d, 0xfc, 0xd8, 0x3f, 0x0b, 0x26, 0x81, 0x0c, 0x44, 0xda, 0x6d, 0x6a, 0x25, 0x0a, 0x38,
|
||||
0xe7, 0x31, 0x6c, 0xf9, 0x71, 0xec, 0x27, 0xd3, 0x28, 0x41, 0xd3, 0xbc, 0x0d, 0x26, 0xa2, 0xdb,
|
||||
0xd2, 0x4c, 0x16, 0xd1, 0x8a, 0x5b, 0x2a, 0x26, 0x41, 0x38, 0x9b, 0xbf, 0x56, 0xb1, 0xd1, 0x6d,
|
||||
0x6b, 0xb2, 0x02, 0x4e, 0x71, 0x0b, 0xa3, 0xcf, 0xc4, 0xe5, 0x51, 0x12, 0x5c, 0xe0, 0x99, 0x11,
|
||||
0x0a, 0xed, 0x68, 0x2b, 0x2e, 0xa2, 0x9d, 0xf7, 0x61, 0x23, 0x99, 0x04, 0xd3, 0x40, 0xa6, 0xdd,
|
||||
0x2d, 0x54, 0xab, 0xb9, 0xdf, 0x66, 0x7b, 0x7a, 0x1a, 0xeb, 0x65, 0xbb, 0xee, 0x0b, 0xa8, 0x13,
|
||||
0x4a, 0x99, 0x57, 0x91, 0xb0, 0xb7, 0xf4, 0x5a, 0xe1, 0xd2, 0xe8, 0xad, 0xd4, 0xbe, 0xaa, 0x7a,
|
||||
0x7a, 0xad, 0x70, 0x63, 0x3f, 0x19, 0x6a, 0x3f, 0x21, 0x4e, 0xad, 0x5d, 0x0f, 0xaa, 0xca, 0x51,
|
||||
0xca, 0xd4, 0x33, 0x76, 0x78, 0xdb, 0x53, 0x4b, 0x85, 0x19, 0x71, 0x4c, 0x21, 0x06, 0x97, 0xce,
|
||||
0xb7, 0xa1, 0xe3, 0x0f, 0x87, 0x68, 0x9e, 0x08, 0xbd, 0xfe, 0x49, 0x30, 0x4c, 0x91, 0x53, 0x05,
|
||||
0x37, 0x17, 0xb0, 0xee, 0x1d, 0x70, 0xec, 0x80, 0xe2, 0x38, 0xfb, 0x8d, 0xc9, 0x07, 0x93, 0xa3,
|
||||
0xeb, 0x82, 0xed, 0x87, 0x85, 0xd4, 0x2e, 0xeb, 0xb0, 0xda, 0xc9, 0x12, 0x24, 0x3f, 0x6d, 0x11,
|
||||
0xb9, 0x3d, 0xe8, 0x2e, 0x73, 0x67, 0xc9, 0x3f, 0x85, 0xdd, 0x17, 0x62, 0x22, 0xbe, 0x8a, 0x64,
|
||||
0x34, 0x51, 0xe8, 0x4f, 0x05, 0x67, 0x92, 0x5e, 0x2b, 0xd6, 0xcb, 0xc7, 0x99, 0xf5, 0xfb, 0x70,
|
||||
0xf7, 0x75, 0x90, 0xca, 0x1b, 0x19, 0xbb, 0xbf, 0x03, 0xc8, 0x89, 0x8c, 0x98, 0x52, 0x2e, 0x46,
|
||||
0xe1, 0xc4, 0x3c, 0x90, 0x9c, 0x5d, 0x7a, 0xad, 0x7c, 0x20, 0x07, 0x31, 0x97, 0x63, 0xb5, 0x54,
|
||||
0x75, 0x67, 0x16, 0x06, 0xf3, 0xe3, 0x68, 0x70, 0x2e, 0x64, 0xaa, 0x6b, 0x1b, 0xd6, 0x1d, 0x0b,
|
||||
0xa5, 0x53, 0x64, 0x2c, 0x26, 0x13, 0x5d, 0xe0, 0x36, 0x3d, 0x02, 0xdc, 0x43, 0xb8, 0xb7, 0xa8,
|
||||
0x28, 0x17, 0xa3, 0xa7, 0xd0, 0xcc, 0xed, 0x98, 0xa2, 0x4a, 0x95, 0xd5, 0xd6, 0xb6, 0xa9, 0xdc,
|
||||
0x87, 0xd0, 0x3a, 0x96, 0x68, 0xed, 0x75, 0xd7, 0x7d, 0x0c, 0x1d, 0x53, 0xc9, 0x34, 0x21, 0xe5,
|
||||
0xa2, 0x2f, 0x67, 0x29, 0x53, 0x31, 0xe4, 0xfe, 0xbd, 0x02, 0x1b, 0x1c, 0x2a, 0x59, 0xbe, 0x97,
|
||||
0xf2, 0x7c, 0xff, 0xbf, 0x94, 0x9d, 0x77, 0xa1, 0x91, 0x5e, 0xa5, 0x52, 0x4c, 0x8f, 0xb8, 0xf8,
|
||||
0xb4, 0xbd, 0x1c, 0xf1, 0x75, 0x09, 0xca, 0x4b, 0xd0, 0x3f, 0x4a, 0xd0, 0x30, 0x6e, 0xfe, 0xaf,
|
||||
0x1b, 0xf8, 0x07, 0xd0, 0x88, 0xc9, 0xf1, 0x82, 0x2a, 0x49, 0x73, 0xbf, 0xc3, 0x82, 0xb2, 0xda,
|
||||
0x91, 0x13, 0x58, 0xf1, 0x53, 0xb5, 0xe3, 0xc7, 0x6a, 0xd0, 0xb5, 0x42, 0x83, 0x46, 0xe7, 0xc7,
|
||||
0xaa, 0x44, 0xd5, 0x75, 0x89, 0xd2, 0x6b, 0xa7, 0x8b, 0x17, 0x9b, 0x85, 0x32, 0xc0, 0xcc, 0xa3,
|
||||
0x9e, 0x92, 0x81, 0xee, 0x47, 0xb0, 0x71, 0xe8, 0x0f, 0xc6, 0x78, 0x0f, 0x75, 0x70, 0x10, 0x73,
|
||||
0x98, 0xe2, 0x41, 0xb5, 0x56, 0x42, 0xa6, 0x02, 0xed, 0x7d, 0xc5, 0xf5, 0x94, 0x21, 0xf7, 0x1c,
|
||||
0x1b, 0x33, 0xa5, 0x01, 0x27, 0xd3, 0x13, 0xac, 0x5c, 0x99, 0x41, 0xb2, 0x5c, 0x5a, 0x6e, 0xed,
|
||||
0x16, 0x0d, 0xba, 0x65, 0x63, 0x4a, 0x92, 0xb9, 0xd0, 0x65, 0x36, 0x60, 0x7d, 0xbc, 0x6c, 0xdb,
|
||||
0xfd, 0x03, 0xce, 0x4e, 0x34, 0x55, 0xdd, 0x38, 0x3b, 0xad, 0x9e, 0x07, 0xc8, 0x7c, 0x95, 0x82,
|
||||
0xf9, 0x9e, 0x42, 0x23, 0x11, 0x69, 0x34, 0x4b, 0xd0, 0xcc, 0xda, 0xb2, 0xcd, 0xfd, 0xbb, 0x59,
|
||||
0x26, 0x69, 0x59, 0x1e, 0xef, 0x7a, 0x39, 0x9d, 0xfb, 0x65, 0x19, 0x3a, 0xc5, 0x5d, 0x55, 0x97,
|
||||
0xce, 0x26, 0xe7, 0x41, 0xf4, 0x39, 0x8d, 0x83, 0x64, 0x3c, 0x1b, 0xa5, 0xb2, 0x0a, 0x6d, 0x79,
|
||||
0x8c, 0x5d, 0x07, 0x25, 0x51, 0x57, 0xc9, 0x11, 0xbc, 0x7b, 0x24, 0x92, 0x20, 0x1a, 0xf2, 0xc8,
|
||||
0x92, 0x23, 0x54, 0x19, 0x40, 0xe0, 0xcd, 0x2c, 0x92, 0x3e, 0x0f, 0xa0, 0x06, 0xd6, 0x73, 0x20,
|
||||
0xfa, 0x48, 0xc8, 0x03, 0xe5, 0xb5, 0x1a, 0xcf, 0x81, 0x06, 0x93, 0xef, 0x1f, 0x8a, 0x69, 0xca,
|
||||
0x69, 0x6e, 0x61, 0x94, 0xe6, 0xe4, 0xcd, 0xd7, 0x2a, 0xa8, 0x39, 0xdf, 0x6d, 0x94, 0xe2, 0x40,
|
||||
0xe0, 0xf1, 0xa5, 0x1f, 0xeb, 0xb4, 0x6f, 0x7b, 0x16, 0x06, 0x03, 0x79, 0x87, 0x20, 0xb4, 0x06,
|
||||
0x4e, 0xfd, 0xbe, 0x6a, 0x85, 0xba, 0x0c, 0xb4, 0xbd, 0xe5, 0x0d, 0x45, 0x7d, 0x2e, 0x92, 0x50,
|
||||
0x4c, 0x0e, 0x2d, 0xa9, 0x40, 0xd4, 0x4b, 0x1b, 0xea, 0x3b, 0x63, 0xc9, 0xe7, 0xdc, 0x7b, 0xbe,
|
||||
0x0f, 0xed, 0x97, 0x17, 0x02, 0xab, 0x71, 0x16, 0x05, 0x68, 0x43, 0x15, 0xcc, 0xe8, 0xd9, 0x69,
|
||||
0xac, 0x3d, 0x50, 0xf5, 0x72, 0x84, 0x9b, 0x42, 0x4d, 0x93, 0xaf, 0x1c, 0x17, 0x28, 0x80, 0xca,
|
||||
0x26, 0x80, 0x8a, 0xe1, 0xd2, 0x36, 0xe1, 0xc2, 0x81, 0x55, 0xcd, 0x03, 0xab, 0x20, 0xb4, 0xb6,
|
||||
0x28, 0xf4, 0x8f, 0x65, 0x68, 0x7d, 0x26, 0xe4, 0x65, 0x94, 0x9c, 0xab, 0x44, 0x49, 0x57, 0x76,
|
||||
0xbe, 0xfb, 0xf8, 0x49, 0x33, 0xef, 0x9f, 0x5d, 0x49, 0x0e, 0x8c, 0x2a, 0xe6, 0xe5, 0xfc, 0xb9,
|
||||
0x02, 0x9d, 0x07, 0x00, 0xb8, 0x75, 0xe4, 0x53, 0xb7, 0xa3, 0xc1, 0xa5, 0x91, 0xcc, 0x19, 0xe1,
|
||||
0xbc, 0x03, 0x0d, 0x6f, 0xde, 0xc7, 0x7a, 0x1a, 0x25, 0x14, 0xbd, 0x55, 0xfc, 0x1a, 0x9a, 0xbf,
|
||||
0xd4, 0xb0, 0x3a, 0x8b, 0x9b, 0xc3, 0x24, 0x8a, 0x63, 0x31, 0xcc, 0x54, 0x4b, 0xe6, 0x2f, 0x08,
|
||||
0xa1, 0xa4, 0x9e, 0x64, 0x52, 0xeb, 0x24, 0x55, 0xe6, 0x52, 0x71, 0x2b, 0x66, 0xa9, 0x1b, 0x7c,
|
||||
0x29, 0x5b, 0xea, 0x89, 0x91, 0xba, 0x49, 0x52, 0xa5, 0x25, 0xf5, 0x24, 0x97, 0xda, 0xc8, 0xce,
|
||||
0xb2, 0x54, 0xf7, 0xaf, 0x25, 0xd8, 0xc4, 0xb0, 0x3c, 0x4d, 0xfd, 0x91, 0xc0, 0x0e, 0xd6, 0x94,
|
||||
0x18, 0xc2, 0x93, 0xfe, 0x4c, 0x81, 0xec, 0x32, 0xd0, 0x28, 0x22, 0xf8, 0x06, 0xb4, 0x62, 0x91,
|
||||
0x60, 0xb0, 0x32, 0x45, 0x19, 0x0b, 0x4a, 0xd5, 0x6b, 0x12, 0x8e, 0x48, 0xf6, 0xe0, 0xb6, 0xde,
|
||||
0xeb, 0x07, 0x61, 0x9f, 0xc2, 0x67, 0x1a, 0x0d, 0x05, 0x9b, 0x6a, 0x47, 0x6f, 0xbd, 0x0a, 0x3f,
|
||||
0x35, 0x1b, 0xce, 0x77, 0x61, 0xc7, 0xd0, 0xab, 0x2e, 0xa9, 0xa9, 0xc9, 0x74, 0x5b, 0x4c, 0x7d,
|
||||
0xca, 0x68, 0x1c, 0x5a, 0x3a, 0x27, 0x63, 0xfc, 0xea, 0x95, 0xd8, 0x46, 0x46, 0x2f, 0x7c, 0x4c,
|
||||
0x36, 0xac, 0xa0, 0xb1, 0x4e, 0xc9, 0x94, 0xb5, 0xcd, 0x40, 0xe7, 0x7b, 0xb0, 0x23, 0x89, 0x56,
|
||||
0x0c, 0xfb, 0x19, 0x0d, 0x79, 0x73, 0xdb, 0x6c, 0x1c, 0x31, 0xf1, 0xb7, 0xa0, 0x93, 0x13, 0xeb,
|
||||
0x7a, 0x4c, 0xfa, 0xb6, 0x0d, 0xf6, 0x44, 0x55, 0xe5, 0x3f, 0x91, 0xb1, 0x28, 0x72, 0x3e, 0xd0,
|
||||
0x15, 0xc2, 0x32, 0x55, 0x73, 0x7f, 0x2b, 0xab, 0xac, 0x6c, 0x0c, 0x5d, 0x15, 0xc8, 0x2c, 0x3f,
|
||||
0x83, 0x2d, 0x69, 0x54, 0xef, 0x63, 0x02, 0xf9, 0x5c, 0x5e, 0xb3, 0xea, 0x56, 0xbc, 0x98, 0xd7,
|
||||
0x91, 0xc5, 0x8b, 0xa2, 0xe5, 0xa9, 0xe5, 0xb3, 0x40, 0xd2, 0xaf, 0x49, 0x38, 0x2d, 0xc2, 0xfd,
|
||||
0x09, 0x34, 0x70, 0x1e, 0x48, 0x49, 0x3b, 0x34, 0xcc, 0x60, 0x96, 0x24, 0x98, 0x5f, 0x99, 0x61,
|
||||
0x18, 0x54, 0xf3, 0x82, 0x6e, 0x97, 0x6c, 0x0c, 0x02, 0xdc, 0x08, 0x80, 0xd2, 0x5c, 0x4b, 0x43,
|
||||
0x1a, 0x3b, 0x04, 0x08, 0x50, 0x71, 0x36, 0xf5, 0xe7, 0xc6, 0xf5, 0x3a, 0xce, 0x10, 0x41, 0x17,
|
||||
0x44, 0x81, 0x6f, 0xfd, 0x60, 0x32, 0xe0, 0x6f, 0x5f, 0x14, 0xc8, 0x60, 0x2e, 0xb0, 0x6a, 0x0b,
|
||||
0xfc, 0x4b, 0x19, 0x9a, 0x24, 0x91, 0x14, 0x46, 0xaa, 0x01, 0x36, 0x16, 0x23, 0x52, 0x03, 0xd8,
|
||||
0xfa, 0x6b, 0xb9, 0xb8, 0x7c, 0x0c, 0xcc, 0x55, 0xcd, 0x74, 0xc3, 0x46, 0x97, 0x62, 0xed, 0xb3,
|
||||
0xac, 0xb3, 0x92, 0xba, 0xa1, 0x88, 0x48, 0xe1, 0x0f, 0xa1, 0x45, 0xf1, 0xc9, 0x67, 0xaa, 0xeb,
|
||||
0xce, 0x34, 0x89, 0x8c, 0x4e, 0x3d, 0x55, 0xd3, 0x16, 0xea, 0xab, 0xbb, 0x7b, 0x73, 0xff, 0x41,
|
||||
0x81, 0x5c, 0xdf, 0x64, 0x4f, 0xff, 0xbe, 0x0c, 0x25, 0x96, 0x59, 0xa2, 0xed, 0x3d, 0x03, 0xc8,
|
||||
0x91, 0xaa, 0x66, 0x9d, 0x8b, 0xab, 0x6c, 0xaa, 0xc4, 0xa5, 0xba, 0xfb, 0x85, 0x3f, 0x99, 0x65,
|
||||
0x46, 0x25, 0xe0, 0xc7, 0xe5, 0x67, 0x25, 0x77, 0x00, 0x5b, 0xcf, 0x55, 0xcf, 0xb2, 0x8e, 0x17,
|
||||
0x9e, 0x6c, 0xaa, 0x2b, 0x9f, 0x6c, 0xaa, 0xd9, 0x93, 0x0d, 0x96, 0xd1, 0x28, 0xe6, 0x0e, 0x8b,
|
||||
0xab, 0x5c, 0x50, 0xd5, 0x12, 0xe4, 0xfe, 0xb3, 0x0a, 0x90, 0x4b, 0x71, 0x8e, 0xa1, 0x17, 0x44,
|
||||
0x7d, 0xd5, 0x20, 0x82, 0x81, 0xa0, 0x82, 0xd4, 0x4f, 0x04, 0x86, 0x4f, 0x1a, 0x5c, 0x08, 0x9e,
|
||||
0x21, 0xee, 0xf1, 0xbd, 0x17, 0x94, 0xf3, 0x76, 0x11, 0xa2, 0x83, 0xba, 0x72, 0x79, 0xd9, 0x31,
|
||||
0xe7, 0x97, 0x70, 0x37, 0x67, 0x3a, 0xb4, 0xf8, 0x95, 0xaf, 0xe5, 0x77, 0xdb, 0xf0, 0x1b, 0xe6,
|
||||
0xbc, 0x7e, 0x0e, 0x88, 0xee, 0x63, 0x8f, 0x99, 0x15, 0x38, 0x55, 0xae, 0xe5, 0xb4, 0x13, 0x44,
|
||||
0x6f, 0xf4, 0x89, 0x9c, 0xcf, 0x1b, 0xb8, 0x6f, 0x5d, 0x54, 0xa5, 0xbd, 0xc5, 0xad, 0x7a, 0x2d,
|
||||
0xb7, 0x7b, 0x46, 0x2f, 0x55, 0x18, 0x72, 0x96, 0x9f, 0x02, 0xee, 0xf4, 0x2f, 0xfd, 0x40, 0x2e,
|
||||
0xf2, 0xab, 0xdd, 0x74, 0xcf, 0xcf, 0xf1, 0x50, 0x91, 0x19, 0xdd, 0x73, 0x2a, 0x92, 0x51, 0xe1,
|
||||
0x9e, 0xf5, 0x9b, 0xee, 0x79, 0xa8, 0x4f, 0xe4, 0x7c, 0x9e, 0x03, 0x22, 0x17, 0xf5, 0xd9, 0xb8,
|
||||
0x96, 0xcb, 0x56, 0x10, 0x15, 0x75, 0x39, 0x80, 0x9d, 0x54, 0x0c, 0x24, 0x76, 0x14, 0x8b, 0xc7,
|
||||
0xe6, 0xb5, 0x3c, 0xb6, 0xf9, 0x80, 0x61, 0xe2, 0x7e, 0x01, 0xad, 0x5f, 0xcc, 0x46, 0x42, 0x4e,
|
||||
0xce, 0x4c, 0xce, 0xff, 0xaf, 0xcb, 0xcc, 0xbf, 0xb1, 0xcc, 0x1c, 0x8c, 0x92, 0x68, 0x16, 0x17,
|
||||
0xaa, 0x36, 0xe5, 0xf0, 0x52, 0xd5, 0xd6, 0x34, 0xba, 0x6a, 0x13, 0xf5, 0x47, 0xd0, 0xa2, 0x81,
|
||||
0x89, 0x0f, 0x50, 0x15, 0x72, 0x96, 0x93, 0x3e, 0x1b, 0xd0, 0xe8, 0xd8, 0x3e, 0x0f, 0x9f, 0x7c,
|
||||
0xaa, 0x58, 0x8d, 0x72, 0x33, 0xe1, 0xd7, 0x47, 0x9e, 0x75, 0xaf, 0xa0, 0x3d, 0x26, 0xdb, 0xf0,
|
||||
0x29, 0x0a, 0xc0, 0x6f, 0x66, 0xca, 0xe5, 0x77, 0xd8, 0xb3, 0x6d, 0x48, 0xa6, 0x6e, 0x8d, 0x6d,
|
||||
0xb3, 0xfe, 0x00, 0x40, 0x7d, 0x5e, 0xf4, 0xb3, 0x42, 0x65, 0xbf, 0xe7, 0x99, 0x0e, 0x81, 0xdf,
|
||||
0x32, 0xd9, 0xb2, 0x77, 0x02, 0x3b, 0x4b, 0x3c, 0x57, 0x94, 0xa9, 0xef, 0xd8, 0x65, 0xaa, 0xb9,
|
||||
0x7f, 0x9b, 0x59, 0xda, 0x47, 0xed, 0xda, 0xf5, 0xe7, 0x12, 0x7d, 0x8d, 0x98, 0x27, 0x17, 0xe7,
|
||||
0x19, 0xb4, 0x43, 0x1a, 0xbe, 0x8c, 0x03, 0x2a, 0x16, 0x23, 0x7b, 0x30, 0xf3, 0x5a, 0xa1, 0x3d,
|
||||
0xa6, 0xa1, 0x23, 0x06, 0xda, 0x02, 0x2b, 0x1d, 0x61, 0x19, 0xc7, 0x6b, 0x0e, 0x2c, 0x6f, 0x17,
|
||||
0x86, 0xc1, 0xca, 0xe2, 0x30, 0xc8, 0x8f, 0x06, 0xeb, 0xde, 0x18, 0xf7, 0xff, 0x55, 0x87, 0xca,
|
||||
0xc7, 0x47, 0xaf, 0x9c, 0x53, 0xd8, 0x5e, 0x7c, 0x40, 0x77, 0x1e, 0xb2, 0xe8, 0x35, 0x8f, 0xee,
|
||||
0xbd, 0xf7, 0xd6, 0xee, 0xf3, 0xb4, 0x7c, 0xcb, 0xf1, 0x60, 0x6b, 0xe1, 0x41, 0xd6, 0xc9, 0xda,
|
||||
0xc9, 0xea, 0x27, 0xe9, 0xde, 0xc3, 0x75, 0xdb, 0x36, 0xcf, 0x85, 0xf1, 0xdc, 0xf0, 0x5c, 0xfd,
|
||||
0xa9, 0x66, 0x78, 0xae, 0x9b, 0xea, 0x6f, 0x39, 0x3f, 0x82, 0x3a, 0x3d, 0xd1, 0x3a, 0x77, 0x98,
|
||||
0xb6, 0xf0, 0xf8, 0xdb, 0xbb, 0xbb, 0x80, 0x35, 0x07, 0x5f, 0x43, 0xbb, 0xf0, 0xea, 0xee, 0xbc,
|
||||
0x53, 0x90, 0x55, 0x7c, 0xe1, 0xed, 0xbd, 0xbb, 0x7a, 0xd3, 0x70, 0x3b, 0x00, 0xc8, 0x5f, 0xf1,
|
||||
0x9c, 0x2e, 0x53, 0x2f, 0xbd, 0x14, 0xf7, 0xee, 0xaf, 0xd8, 0x31, 0x4c, 0xd0, 0x95, 0x8b, 0xcf,
|
||||
0x72, 0xce, 0x82, 0x55, 0x17, 0x9f, 0xce, 0x8c, 0x2b, 0xd7, 0xbe, 0xe7, 0x69, 0xb6, 0x8b, 0x4f,
|
||||
0x72, 0x86, 0xed, 0x9a, 0xa7, 0x3e, 0xc3, 0x76, 0xed, 0x5b, 0xde, 0x2d, 0xe7, 0x57, 0xd0, 0x29,
|
||||
0x3e, 0x92, 0x39, 0x99, 0x91, 0x56, 0x3e, 0xf2, 0xf5, 0x1e, 0xac, 0xd9, 0x35, 0x0c, 0x3f, 0x84,
|
||||
0x1a, 0xbd, 0x7e, 0x65, 0x29, 0x67, 0x3f, 0x9a, 0xf5, 0xee, 0x14, 0x91, 0xe6, 0xd4, 0x13, 0xa8,
|
||||
0xd3, 0x87, 0x9d, 0x09, 0x80, 0xc2, 0x77, 0x5e, 0xaf, 0x65, 0x63, 0xdd, 0x5b, 0x4f, 0x4a, 0x99,
|
||||
0x9c, 0xb4, 0x20, 0x27, 0x5d, 0x25, 0xc7, 0x72, 0xce, 0x59, 0x5d, 0xff, 0xa3, 0xf5, 0xf4, 0x3f,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xa3, 0xf6, 0xb8, 0xde, 0x1a, 0x00, 0x00,
|
||||
// 2348 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x73, 0x1c, 0x49,
|
||||
0x11, 0xf6, 0x3c, 0xa5, 0xc9, 0x79, 0x48, 0xea, 0xd5, 0x63, 0x3c, 0xbb, 0xf6, 0x9a, 0x8e, 0x05,
|
||||
0x0c, 0x2c, 0xc2, 0x8c, 0x77, 0x03, 0x07, 0x44, 0x10, 0x61, 0x4b, 0x66, 0x31, 0x6b, 0x2d, 0xe3,
|
||||
0x96, 0xc4, 0x1e, 0x27, 0x5a, 0xdd, 0xe5, 0x99, 0x46, 0x33, 0xdd, 0xbd, 0xdd, 0x35, 0xd2, 0xe8,
|
||||
0xc2, 0x81, 0x03, 0xdc, 0xf8, 0x03, 0x10, 0x5c, 0xb8, 0x71, 0xe7, 0xc0, 0x2f, 0x20, 0x82, 0x1f,
|
||||
0xc2, 0x8d, 0x3b, 0x47, 0xb2, 0xaa, 0xb2, 0xab, 0xab, 0xe7, 0x21, 0x99, 0x03, 0xc1, 0x85, 0xcb,
|
||||
0x44, 0xd5, 0x57, 0x59, 0x99, 0x59, 0xf9, 0xaa, 0xec, 0x1a, 0x68, 0xb8, 0x71, 0x70, 0x18, 0x27,
|
||||
0x11, 0x8f, 0xac, 0x1a, 0xbf, 0x89, 0x59, 0x6a, 0xdf, 0x87, 0x83, 0xcf, 0x18, 0x3f, 0x65, 0xc9,
|
||||
0x15, 0x4b, 0x7e, 0xc1, 0x92, 0x34, 0x88, 0x42, 0x87, 0x7d, 0x35, 0x63, 0x29, 0xb7, 0xe7, 0xd0,
|
||||
0x5d, 0x5e, 0x4a, 0xe3, 0x28, 0x4c, 0x99, 0xb5, 0x0b, 0xb5, 0xa9, 0xfb, 0xcb, 0x28, 0xe9, 0x96,
|
||||
0x1e, 0x95, 0x1e, 0xb7, 0x1d, 0x35, 0x91, 0x68, 0x10, 0x22, 0x5a, 0x26, 0x54, 0x4c, 0x04, 0x1a,
|
||||
0xbb, 0xdc, 0x1b, 0x77, 0x2b, 0x0a, 0x95, 0x13, 0xab, 0x07, 0x9b, 0x09, 0xbb, 0x0a, 0x04, 0xd7,
|
||||
0x6e, 0x15, 0x17, 0x1a, 0x8e, 0x9e, 0xdb, 0xbf, 0x29, 0xc1, 0xee, 0x79, 0xec, 0xbb, 0x9c, 0x0d,
|
||||
0x92, 0xc8, 0x63, 0x69, 0x4a, 0x2a, 0x59, 0x1d, 0x28, 0x07, 0xbe, 0x94, 0xd9, 0x70, 0x70, 0x64,
|
||||
0x6d, 0x43, 0x25, 0x46, 0xa0, 0x2c, 0x01, 0x31, 0xb4, 0x1e, 0x02, 0x78, 0x93, 0x28, 0x65, 0xa7,
|
||||
0xdc, 0x0f, 0x42, 0x29, 0x71, 0xd3, 0x31, 0x10, 0xa1, 0xcc, 0x75, 0xe0, 0xf3, 0xb1, 0x94, 0x89,
|
||||
0xca, 0xc8, 0x89, 0xb5, 0x0f, 0xf5, 0x31, 0x0b, 0x46, 0x63, 0xde, 0xad, 0x49, 0x98, 0x66, 0xf6,
|
||||
0x01, 0xec, 0x2d, 0xe8, 0xa1, 0xce, 0x6f, 0xff, 0xbd, 0x0c, 0xfb, 0x47, 0x09, 0xc3, 0x95, 0xa3,
|
||||
0x28, 0xe4, 0x6e, 0x10, 0xb2, 0x64, 0x9d, 0x8e, 0xa8, 0xd1, 0xc5, 0x2c, 0xf4, 0x27, 0x6c, 0xe0,
|
||||
0xa2, 0x58, 0xa5, 0xaa, 0x81, 0x48, 0x8d, 0xc7, 0xcc, 0xbb, 0x8c, 0xa3, 0x20, 0xe4, 0x52, 0x63,
|
||||
0x5c, 0xcf, 0x11, 0xa1, 0x71, 0x2a, 0x0f, 0xa3, 0xac, 0xa4, 0x26, 0x42, 0x63, 0x1c, 0x44, 0x33,
|
||||
0xa5, 0x71, 0xc3, 0xa1, 0x19, 0xe1, 0x2c, 0x49, 0xba, 0x75, 0x8d, 0xe3, 0x4c, 0xe0, 0x13, 0xf7,
|
||||
0x82, 0x4d, 0xd2, 0xee, 0xc6, 0xa3, 0x8a, 0xc0, 0xd5, 0xcc, 0x7a, 0x04, 0xcd, 0x30, 0x1a, 0x04,
|
||||
0x57, 0x11, 0x77, 0xa2, 0x88, 0x77, 0x37, 0xa5, 0xc1, 0x4c, 0xc8, 0xea, 0xc2, 0x46, 0x32, 0x0b,
|
||||
0x79, 0x30, 0x65, 0xdd, 0x86, 0x64, 0x99, 0x4d, 0xc5, 0x5e, 0x1a, 0x3e, 0x4f, 0x46, 0x69, 0x17,
|
||||
0x24, 0x63, 0x13, 0xb2, 0x3e, 0x82, 0x76, 0x7e, 0x92, 0xe3, 0x20, 0xe9, 0x36, 0x25, 0x87, 0x22,
|
||||
0x68, 0xbf, 0x82, 0x83, 0x25, 0x5b, 0x52, 0x9c, 0x1d, 0x42, 0xc3, 0xcb, 0x40, 0x69, 0xd3, 0x66,
|
||||
0x7f, 0xfb, 0x50, 0x46, 0xee, 0x61, 0x4e, 0x9c, 0x93, 0x20, 0xab, 0xf6, 0x69, 0x30, 0x0a, 0xdd,
|
||||
0xc9, 0xbb, 0x47, 0x8c, 0xb0, 0x98, 0xdc, 0x42, 0xf1, 0x49, 0x33, 0x7b, 0x1b, 0x3a, 0x19, 0x2b,
|
||||
0x72, 0xfa, 0x5f, 0x2a, 0xb0, 0xf3, 0xdc, 0xf7, 0xef, 0x88, 0x49, 0x0c, 0x6c, 0xce, 0x12, 0x0c,
|
||||
0x7d, 0xe4, 0x58, 0x96, 0xe6, 0xd4, 0x73, 0xeb, 0x43, 0xa8, 0xce, 0x52, 0x3c, 0x49, 0x45, 0x9e,
|
||||
0xa4, 0x49, 0x27, 0x39, 0x47, 0xc8, 0x91, 0x0b, 0x96, 0x05, 0x55, 0x57, 0xd8, 0xb2, 0x2a, 0x6d,
|
||||
0x29, 0xc7, 0x42, 0x65, 0x16, 0x5e, 0xa1, 0x9f, 0x05, 0x24, 0x86, 0x02, 0xf1, 0xae, 0x7d, 0xf2,
|
||||
0xb0, 0x18, 0x66, 0xc7, 0xda, 0xc8, 0x8f, 0xa5, 0xc3, 0x66, 0x73, 0x75, 0xd8, 0x34, 0xd6, 0x84,
|
||||
0x0d, 0x14, 0xc2, 0xc6, 0x86, 0x96, 0xe7, 0xc6, 0xee, 0x45, 0x30, 0x09, 0x78, 0xc0, 0x52, 0xf4,
|
||||
0x9f, 0x50, 0xa2, 0x80, 0x59, 0x8f, 0x61, 0xcb, 0x8d, 0x63, 0x37, 0x99, 0x46, 0x09, 0x9a, 0xe6,
|
||||
0x6d, 0x30, 0x61, 0xdd, 0x96, 0x64, 0xb2, 0x08, 0x0b, 0x6e, 0x29, 0x9b, 0x04, 0xe1, 0x6c, 0xfe,
|
||||
0x5a, 0x44, 0x5f, 0xb7, 0x2d, 0xc9, 0x0a, 0x98, 0xe0, 0x16, 0x46, 0x5f, 0xb0, 0xeb, 0x41, 0x12,
|
||||
0x5c, 0xe1, 0x9e, 0x11, 0x0a, 0xed, 0x48, 0x2b, 0x2e, 0xc2, 0xd6, 0x37, 0x31, 0x30, 0x27, 0xc1,
|
||||
0x34, 0xe0, 0x69, 0x77, 0x0b, 0xd5, 0x6a, 0xf6, 0xdb, 0x64, 0x4f, 0x47, 0xa2, 0x4e, 0xb6, 0x6a,
|
||||
0x1f, 0x43, 0x5d, 0x41, 0xc2, 0xbc, 0x82, 0x84, 0xbc, 0x25, 0xc7, 0x02, 0x4b, 0xa3, 0xb7, 0x5c,
|
||||
0xfa, 0xaa, 0xea, 0xc8, 0xb1, 0xc0, 0xc6, 0x6e, 0xe2, 0x4b, 0x3f, 0x21, 0x26, 0xc6, 0xb6, 0x03,
|
||||
0x55, 0xe1, 0x28, 0x61, 0xea, 0x19, 0x39, 0xbc, 0xed, 0x88, 0xa1, 0x40, 0x46, 0x14, 0x53, 0x88,
|
||||
0xe0, 0xd0, 0xfa, 0x06, 0x74, 0x5c, 0xdf, 0x47, 0xf3, 0x44, 0xe8, 0xf5, 0xcf, 0x02, 0x3f, 0x45,
|
||||
0x4e, 0x15, 0x5c, 0x5c, 0x40, 0xed, 0x5d, 0xb0, 0xcc, 0x80, 0xa2, 0x38, 0xfb, 0x75, 0x49, 0x27,
|
||||
0x84, 0xce, 0x93, 0x75, 0xd1, 0xf6, 0xfd, 0x42, 0xf5, 0x28, 0xcb, 0xb8, 0xda, 0xc9, 0x32, 0x24,
|
||||
0xdf, 0x6d, 0x16, 0x94, 0xa5, 0xa4, 0xac, 0xac, 0x4a, 0xca, 0x1e, 0x74, 0x97, 0x75, 0x20, 0x05,
|
||||
0x3d, 0x38, 0x38, 0x66, 0x13, 0xf6, 0x2e, 0xfa, 0xa1, 0x25, 0x43, 0x17, 0x4b, 0x87, 0x4a, 0x38,
|
||||
0x39, 0x7e, 0x77, 0x05, 0x96, 0x85, 0x90, 0x02, 0x27, 0xb0, 0xf7, 0x3a, 0x48, 0xf9, 0xdd, 0xe2,
|
||||
0x97, 0x44, 0x95, 0x57, 0x89, 0xfa, 0x15, 0x40, 0xce, 0x4a, 0xab, 0x5c, 0x32, 0x54, 0x46, 0x8c,
|
||||
0xcd, 0x03, 0x4e, 0x09, 0x2d, 0xc7, 0xc2, 0xed, 0xdc, 0x8b, 0xe9, 0x8e, 0x11, 0x43, 0x51, 0x10,
|
||||
0x67, 0x61, 0x30, 0x3f, 0x8d, 0xbc, 0x4b, 0xc6, 0x53, 0x59, 0xb0, 0xb1, 0x98, 0x1a, 0x90, 0xcc,
|
||||
0xca, 0x31, 0x9b, 0x4c, 0x64, 0xd5, 0xde, 0x74, 0xd4, 0x04, 0x8f, 0xb3, 0xbf, 0x78, 0x1c, 0xaa,
|
||||
0x7f, 0x4f, 0xa1, 0x99, 0xab, 0x9a, 0xa2, 0x4a, 0x95, 0xd5, 0xfe, 0x35, 0xa9, 0xec, 0x87, 0xd0,
|
||||
0x3a, 0xe5, 0xe8, 0xb9, 0x35, 0x46, 0xb1, 0x1f, 0x43, 0x47, 0x17, 0x4f, 0x49, 0xa8, 0xd2, 0xdf,
|
||||
0xe5, 0xb3, 0x94, 0xa8, 0x68, 0x66, 0xff, 0xb5, 0x02, 0x1b, 0x14, 0x9d, 0x59, 0x89, 0x29, 0xe5,
|
||||
0x25, 0xe6, 0x7f, 0x52, 0xe9, 0x3e, 0x80, 0x46, 0x7a, 0x93, 0x72, 0x36, 0x1d, 0x50, 0xbd, 0x6b,
|
||||
0x3b, 0x39, 0xf0, 0xff, 0xaa, 0x97, 0x57, 0xbd, 0xbf, 0x95, 0xa0, 0xa1, 0xdd, 0xfc, 0x1f, 0x77,
|
||||
0x25, 0x1f, 0x43, 0x23, 0x56, 0x8e, 0x67, 0xaa, 0x78, 0x35, 0xfb, 0x1d, 0x12, 0x94, 0x95, 0xab,
|
||||
0x9c, 0xc0, 0x88, 0x9f, 0xaa, 0x19, 0x3f, 0x46, 0xd7, 0x51, 0x2b, 0x74, 0x1d, 0xe8, 0xfc, 0x58,
|
||||
0x54, 0xc5, 0xba, 0xac, 0x8a, 0x72, 0x6c, 0xf6, 0x19, 0x1b, 0x85, 0x3e, 0xc3, 0xfe, 0x14, 0x36,
|
||||
0x4e, 0x5c, 0x6f, 0x8c, 0xe7, 0x10, 0x1b, 0xbd, 0x98, 0xc2, 0x14, 0x37, 0x8a, 0xb1, 0x10, 0x32,
|
||||
0x65, 0x68, 0xef, 0x1b, 0x2a, 0xe1, 0x34, 0xb3, 0x2f, 0xb1, 0x17, 0x50, 0x69, 0x40, 0xc9, 0xf4,
|
||||
0x04, 0x6b, 0x65, 0x66, 0x90, 0x2c, 0x97, 0x96, 0xbb, 0x09, 0x83, 0x06, 0xdd, 0xb2, 0x31, 0x55,
|
||||
0x92, 0xa9, 0xb4, 0x66, 0x36, 0x20, 0x7d, 0x9c, 0x6c, 0xd9, 0xfe, 0x6d, 0x09, 0xf6, 0x55, 0xab,
|
||||
0x78, 0x67, 0x43, 0xb8, 0xba, 0x05, 0x51, 0xe6, 0xab, 0x14, 0xcc, 0xf7, 0x14, 0x1a, 0x09, 0x4b,
|
||||
0xa3, 0x59, 0x82, 0x66, 0x96, 0x96, 0x6d, 0xf6, 0xf7, 0xb2, 0x4c, 0x92, 0xb2, 0x1c, 0x5a, 0x75,
|
||||
0x72, 0x3a, 0xfb, 0x0f, 0x15, 0xe8, 0x14, 0x57, 0x45, 0x5d, 0xba, 0x98, 0x5c, 0x06, 0xd1, 0x97,
|
||||
0xaa, 0xc7, 0x55, 0xc6, 0x33, 0x21, 0x91, 0x55, 0x68, 0xcb, 0x53, 0xbc, 0xe8, 0x50, 0x92, 0xba,
|
||||
0xc8, 0x72, 0x80, 0x56, 0x07, 0x2c, 0x09, 0x22, 0x9f, 0xba, 0xa4, 0x1c, 0x10, 0x65, 0x00, 0x27,
|
||||
0x6f, 0x66, 0x11, 0x77, 0xa9, 0xab, 0xd6, 0x73, 0xd9, 0xdc, 0xa2, 0x8f, 0x18, 0x3f, 0x12, 0x5e,
|
||||
0xab, 0x51, 0x73, 0xab, 0x91, 0x7c, 0xfd, 0x84, 0x4d, 0x53, 0x4a, 0x73, 0x03, 0x11, 0x9a, 0x2b,
|
||||
0x6f, 0xbe, 0x16, 0x41, 0x4d, 0xf9, 0x6e, 0x42, 0x82, 0x83, 0x9a, 0x9e, 0x5e, 0xbb, 0xb1, 0x4c,
|
||||
0xfb, 0xb6, 0x63, 0x20, 0x18, 0xc8, 0x3b, 0x6a, 0x86, 0xd6, 0xc0, 0x4f, 0x19, 0x57, 0xdc, 0xbe,
|
||||
0xb2, 0x0c, 0xb4, 0x9d, 0xe5, 0x05, 0x41, 0x7d, 0xc9, 0x92, 0x90, 0x4d, 0x4e, 0x0c, 0xa9, 0xa0,
|
||||
0xa8, 0x97, 0x16, 0xac, 0x3e, 0xec, 0x2a, 0xf0, 0xec, 0x68, 0x60, 0x6e, 0x68, 0xca, 0x0d, 0x2b,
|
||||
0xd7, 0xc4, 0x07, 0xd7, 0x52, 0x9c, 0xd0, 0xad, 0xf6, 0x5d, 0x68, 0xbf, 0xbc, 0x62, 0x58, 0xc1,
|
||||
0xb3, 0xc8, 0x41, 0xbb, 0x8b, 0x04, 0xc0, 0x68, 0x98, 0xc6, 0xd2, 0x6b, 0x55, 0x27, 0x07, 0xec,
|
||||
0x14, 0x6a, 0x92, 0x7c, 0x65, 0x57, 0xa3, 0x82, 0xae, 0xac, 0x83, 0xae, 0x18, 0x62, 0x6d, 0x1d,
|
||||
0x62, 0x14, 0x8c, 0xd5, 0x3c, 0x18, 0x0b, 0x42, 0x6b, 0x8b, 0x42, 0x7f, 0x57, 0x86, 0xd6, 0x17,
|
||||
0x8c, 0x5f, 0x47, 0xc9, 0xa5, 0x48, 0xae, 0x74, 0xe5, 0x6d, 0x79, 0x1f, 0xbf, 0xed, 0xe6, 0xc3,
|
||||
0x8b, 0x1b, 0x4e, 0xc1, 0x54, 0xc5, 0x5c, 0x9e, 0xbf, 0x10, 0x53, 0xeb, 0x01, 0x00, 0x2e, 0x0d,
|
||||
0x5c, 0x75, 0x43, 0xaa, 0xfe, 0xaa, 0x91, 0xcc, 0x09, 0xb0, 0xde, 0x87, 0x86, 0x33, 0x1f, 0x62,
|
||||
0x0d, 0x8e, 0x12, 0x15, 0xf1, 0x55, 0xfc, 0x2c, 0x9c, 0xbf, 0x94, 0x73, 0xb1, 0x17, 0x17, 0xfd,
|
||||
0x24, 0x8a, 0x63, 0xe6, 0x67, 0xaa, 0x25, 0xf3, 0x63, 0x05, 0x08, 0xa9, 0x67, 0x99, 0xd4, 0xba,
|
||||
0x92, 0xca, 0x73, 0xa9, 0xb8, 0x14, 0x93, 0xd4, 0x0d, 0x3a, 0x94, 0x29, 0xf5, 0x4c, 0x4b, 0xdd,
|
||||
0x54, 0x52, 0xb9, 0x21, 0xf5, 0x2c, 0x97, 0xda, 0xc8, 0xf6, 0x92, 0x54, 0xfb, 0xcf, 0x25, 0xd8,
|
||||
0xc4, 0x50, 0x3e, 0x4f, 0xdd, 0x11, 0xc3, 0x5b, 0xaf, 0xc9, 0x31, 0xec, 0x27, 0xc3, 0x99, 0x98,
|
||||
0x92, 0xcb, 0x40, 0x42, 0x8a, 0xe0, 0x6b, 0xd0, 0x8a, 0x59, 0x82, 0x01, 0x4e, 0x14, 0x65, 0x2c,
|
||||
0x42, 0x55, 0xa7, 0xa9, 0x30, 0x45, 0x72, 0x08, 0xef, 0xc9, 0xb5, 0x61, 0x10, 0x0e, 0x55, 0x04,
|
||||
0x4d, 0x23, 0x9f, 0x91, 0xa9, 0x76, 0xe4, 0xd2, 0xab, 0xf0, 0x73, 0xbd, 0x60, 0x7d, 0x1b, 0x76,
|
||||
0x34, 0xbd, 0xb8, 0x59, 0x25, 0xb5, 0x32, 0xdd, 0x16, 0x51, 0x9f, 0x13, 0x8c, 0x8d, 0x4e, 0xe7,
|
||||
0x6c, 0x8c, 0x9f, 0xff, 0x1c, 0xaf, 0x9e, 0xd1, 0xb1, 0x8b, 0x09, 0x8a, 0x55, 0x37, 0x96, 0x69,
|
||||
0x9c, 0x92, 0xb6, 0xd9, 0xd4, 0xfa, 0x0e, 0xec, 0x70, 0x45, 0xcb, 0xfc, 0x61, 0x46, 0xa3, 0xbc,
|
||||
0xb9, 0xad, 0x17, 0x06, 0x44, 0xfc, 0x75, 0xe8, 0xe4, 0xc4, 0xb2, 0x86, 0x2b, 0x7d, 0xdb, 0x1a,
|
||||
0x3d, 0x13, 0x95, 0xfc, 0xf7, 0xca, 0x58, 0x2a, 0x72, 0x3e, 0x96, 0x55, 0xc5, 0x30, 0x55, 0xb3,
|
||||
0xbf, 0x95, 0x55, 0x63, 0x32, 0x86, 0xac, 0x24, 0xca, 0x2c, 0x3f, 0x86, 0x2d, 0xae, 0x55, 0x1f,
|
||||
0x62, 0x02, 0xb9, 0x54, 0x92, 0xb3, 0x8a, 0x58, 0x3c, 0x98, 0xd3, 0xe1, 0xc5, 0x83, 0xa2, 0xe5,
|
||||
0x55, 0x9b, 0x40, 0x02, 0x95, 0x7e, 0x4d, 0x85, 0x49, 0x11, 0xf6, 0x8f, 0xa0, 0x81, 0x3d, 0x44,
|
||||
0xaa, 0xb4, 0x43, 0xc3, 0x78, 0xb3, 0x24, 0xc1, 0xfc, 0xca, 0x0c, 0x43, 0x53, 0xd1, 0x63, 0xc8,
|
||||
0x2b, 0x96, 0x8c, 0xa1, 0x26, 0x76, 0x04, 0xa0, 0xd2, 0x5c, 0x4a, 0x43, 0x1a, 0x33, 0x04, 0xd4,
|
||||
0x44, 0xc4, 0xd9, 0xd4, 0x9d, 0x6b, 0xd7, 0xcb, 0x38, 0x43, 0x40, 0x1d, 0x10, 0x05, 0xbe, 0x75,
|
||||
0x83, 0x89, 0x47, 0x8f, 0x00, 0x28, 0x90, 0xa6, 0xb9, 0xc0, 0xaa, 0x29, 0xf0, 0x4f, 0x65, 0x68,
|
||||
0x2a, 0x89, 0x4a, 0x61, 0xa4, 0xf2, 0xf0, 0x32, 0xd2, 0x22, 0xe5, 0x04, 0xdb, 0x85, 0x5a, 0x2e,
|
||||
0x2e, 0x6f, 0x1d, 0x73, 0x55, 0x33, 0xdd, 0xf0, 0x72, 0x4c, 0xb1, 0x5e, 0x1a, 0xd6, 0x59, 0x49,
|
||||
0xdd, 0x10, 0x44, 0x4a, 0xe1, 0x4f, 0xa0, 0xa5, 0xe2, 0x93, 0xf6, 0x54, 0xd7, 0xed, 0x69, 0x2a,
|
||||
0x32, 0xb5, 0xeb, 0xa9, 0xe8, 0xd0, 0x50, 0x5f, 0xd9, 0x11, 0x34, 0xfb, 0x0f, 0x0a, 0xe4, 0xf2,
|
||||
0x24, 0x87, 0xf2, 0xf7, 0x65, 0xc8, 0xb1, 0x34, 0x2b, 0xda, 0xde, 0x33, 0x80, 0x1c, 0x14, 0x35,
|
||||
0xeb, 0x92, 0xdd, 0x64, 0x9d, 0x28, 0x0e, 0xc5, 0xd9, 0xaf, 0xdc, 0xc9, 0x2c, 0x33, 0xaa, 0x9a,
|
||||
0xfc, 0xb0, 0xfc, 0xac, 0x84, 0x9f, 0x2a, 0x5b, 0x2f, 0xc4, 0x3d, 0x67, 0x6c, 0x2f, 0xbc, 0x5d,
|
||||
0x55, 0x57, 0xbe, 0x5d, 0x55, 0xb3, 0xb7, 0x2b, 0x2c, 0xa3, 0x51, 0x4c, 0xb7, 0x32, 0x8e, 0x72,
|
||||
0x41, 0x55, 0x43, 0x90, 0xfd, 0x8f, 0x2a, 0x40, 0x2e, 0xc5, 0x3a, 0x85, 0x5e, 0x10, 0x0d, 0xc5,
|
||||
0xa5, 0x12, 0x78, 0x4c, 0x15, 0xa4, 0x61, 0xc2, 0x30, 0x7c, 0xd2, 0xe0, 0x8a, 0x51, 0xdf, 0xb1,
|
||||
0x4f, 0xe7, 0x5e, 0x50, 0xce, 0x39, 0xc0, 0x99, 0xda, 0x28, 0x2b, 0x97, 0x93, 0x6d, 0xb3, 0x7e,
|
||||
0x06, 0x7b, 0x39, 0x53, 0xdf, 0xe0, 0x57, 0xbe, 0x95, 0xdf, 0x7b, 0x9a, 0x9f, 0x9f, 0xf3, 0xfa,
|
||||
0x09, 0x20, 0x3c, 0xc4, 0x3b, 0x66, 0x56, 0xe0, 0x54, 0xb9, 0x95, 0xd3, 0x4e, 0x10, 0xbd, 0x91,
|
||||
0x3b, 0x72, 0x3e, 0x6f, 0xe0, 0xbe, 0x71, 0x50, 0x91, 0xf6, 0x06, 0xb7, 0xea, 0xad, 0xdc, 0xf6,
|
||||
0xb5, 0x5e, 0xa2, 0x30, 0xe4, 0x2c, 0x3f, 0x07, 0x5c, 0x19, 0x5e, 0xbb, 0x01, 0x5f, 0xe4, 0x57,
|
||||
0xbb, 0xeb, 0x9c, 0x5f, 0xe2, 0xa6, 0x22, 0x33, 0x75, 0xce, 0x29, 0x4b, 0x46, 0x85, 0x73, 0xd6,
|
||||
0xef, 0x3a, 0xe7, 0x89, 0xdc, 0x91, 0xf3, 0x79, 0x01, 0x08, 0x2e, 0xea, 0xb3, 0x71, 0x2b, 0x97,
|
||||
0xad, 0x20, 0x2a, 0xea, 0x72, 0x04, 0x3b, 0x29, 0xf3, 0x38, 0xde, 0x28, 0x06, 0x8f, 0xcd, 0x5b,
|
||||
0x79, 0x6c, 0xd3, 0x06, 0xcd, 0xc4, 0xfe, 0x0a, 0x5a, 0x3f, 0x9d, 0x8d, 0x18, 0x9f, 0x5c, 0xe8,
|
||||
0x9c, 0xff, 0x6f, 0x97, 0x99, 0x7f, 0x61, 0x99, 0x39, 0x1a, 0x25, 0xd1, 0x2c, 0x2e, 0x54, 0x6d,
|
||||
0x95, 0xc3, 0x4b, 0x55, 0x5b, 0xd2, 0xc8, 0xaa, 0xad, 0xa8, 0x3f, 0x85, 0x96, 0x6a, 0xb2, 0x68,
|
||||
0x83, 0xaa, 0x42, 0xd6, 0x72, 0xd2, 0x67, 0x4d, 0x9d, 0xda, 0xd6, 0xa7, 0x86, 0x95, 0x76, 0x15,
|
||||
0xab, 0x51, 0x6e, 0x26, 0xfc, 0x62, 0xc9, 0xb3, 0xee, 0x15, 0xb4, 0xc7, 0xca, 0x36, 0xb4, 0x4b,
|
||||
0x05, 0xe0, 0x47, 0x99, 0x72, 0xf9, 0x19, 0x0e, 0x4d, 0x1b, 0x2a, 0x53, 0xb7, 0xc6, 0xa6, 0x59,
|
||||
0xbf, 0x07, 0x20, 0x3e, 0x49, 0x86, 0x59, 0xa1, 0x32, 0x9f, 0x1d, 0xf5, 0x0d, 0x81, 0xdf, 0x3f,
|
||||
0xd9, 0xb0, 0x77, 0x06, 0x3b, 0x4b, 0x3c, 0x57, 0x94, 0xa9, 0x6f, 0x99, 0x65, 0xaa, 0xd9, 0x7f,
|
||||
0x8f, 0x58, 0x9a, 0x5b, 0xcd, 0xda, 0xf5, 0xc7, 0x92, 0xfa, 0x82, 0xd1, 0x2f, 0x43, 0xd6, 0x33,
|
||||
0x68, 0x87, 0xaa, 0xf9, 0xd2, 0x0e, 0xa8, 0x18, 0x8c, 0xcc, 0xc6, 0xcc, 0x69, 0x85, 0x66, 0x9b,
|
||||
0x86, 0x8e, 0xf0, 0xa4, 0x05, 0x56, 0x3a, 0xc2, 0x30, 0x8e, 0xd3, 0xf4, 0x0c, 0x6f, 0x17, 0x9a,
|
||||
0xc1, 0xca, 0x62, 0x33, 0x48, 0x0f, 0x0d, 0xeb, 0x9e, 0x42, 0xfb, 0xff, 0xac, 0x43, 0xe5, 0xf9,
|
||||
0xe0, 0x95, 0x75, 0x0e, 0xdb, 0x8b, 0xff, 0x24, 0x58, 0x0f, 0x49, 0xf4, 0x9a, 0x7f, 0x1f, 0x7a,
|
||||
0x1f, 0xae, 0x5d, 0xa7, 0x6e, 0xf9, 0x9e, 0xe5, 0xc0, 0xd6, 0xc2, 0xbb, 0xb1, 0x95, 0x5d, 0x27,
|
||||
0xab, 0xdf, 0xe6, 0x7b, 0x0f, 0xd7, 0x2d, 0x9b, 0x3c, 0x17, 0xda, 0x73, 0xcd, 0x73, 0xf5, 0xe7,
|
||||
0x9d, 0xe6, 0xb9, 0xae, 0xab, 0xbf, 0x67, 0xfd, 0x00, 0xea, 0xea, 0x25, 0xd9, 0xda, 0x25, 0xda,
|
||||
0xc2, 0x1b, 0x75, 0x6f, 0x6f, 0x01, 0xd5, 0x1b, 0x5f, 0x43, 0xbb, 0xf0, 0xf7, 0x83, 0xf5, 0x7e,
|
||||
0x41, 0x56, 0xf1, 0x21, 0xba, 0xf7, 0xc1, 0xea, 0x45, 0xcd, 0xed, 0x08, 0x20, 0x7f, 0x6c, 0xb4,
|
||||
0xba, 0x44, 0xbd, 0xf4, 0xa0, 0xdd, 0xbb, 0xbf, 0x62, 0x45, 0x33, 0x41, 0x57, 0x2e, 0x3e, 0x0b,
|
||||
0x5a, 0x0b, 0x56, 0x5d, 0x7c, 0x94, 0xd3, 0xae, 0x5c, 0xfb, 0x9e, 0x28, 0xd9, 0x2e, 0x3e, 0xf6,
|
||||
0x69, 0xb6, 0x6b, 0x9e, 0x1a, 0x35, 0xdb, 0xb5, 0xaf, 0x84, 0xf7, 0xac, 0x9f, 0x43, 0xa7, 0xf8,
|
||||
0xb0, 0x66, 0x65, 0x46, 0x5a, 0xf9, 0x7c, 0xd8, 0x7b, 0xb0, 0x66, 0x55, 0x33, 0xfc, 0x04, 0x6a,
|
||||
0xea, 0xc5, 0x2c, 0x4b, 0x39, 0xf3, 0xa1, 0xad, 0xb7, 0x5b, 0x04, 0xf5, 0xae, 0x27, 0x50, 0x57,
|
||||
0x1f, 0x76, 0x3a, 0x00, 0x0a, 0xdf, 0x79, 0xbd, 0x96, 0x89, 0xda, 0xf7, 0x9e, 0x94, 0x32, 0x39,
|
||||
0x69, 0x41, 0x4e, 0xba, 0x4a, 0x8e, 0xe1, 0x9c, 0x8b, 0xba, 0xfc, 0x6b, 0xef, 0xe9, 0xbf, 0x03,
|
||||
0x00, 0x00, 0xff, 0xff, 0x58, 0xa9, 0x0a, 0x41, 0xe7, 0x1b, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -47,6 +47,9 @@ message CreateContainerRequest {
|
|||
string stderr = 6; // path to file where stderr will be written (optional)
|
||||
repeated string labels = 7;
|
||||
bool noPivotRoot = 8;
|
||||
string runtime = 9;
|
||||
repeated string runtimeArgs = 10;
|
||||
string checkpointDir = 11; // Directory where checkpoints are stored
|
||||
}
|
||||
|
||||
message CreateContainerResponse {
|
||||
|
@ -98,6 +101,7 @@ message AddProcessResponse {
|
|||
message CreateCheckpointRequest {
|
||||
string id = 1; // ID of container
|
||||
Checkpoint checkpoint = 2; // Checkpoint configuration
|
||||
string checkpointDir = 3; // Directory where checkpoints are stored
|
||||
}
|
||||
|
||||
message CreateCheckpointResponse {
|
||||
|
@ -106,6 +110,7 @@ message CreateCheckpointResponse {
|
|||
message DeleteCheckpointRequest {
|
||||
string id = 1; // ID of container
|
||||
string name = 2; // Name of checkpoint
|
||||
string checkpointDir = 3; // Directory where checkpoints are stored
|
||||
}
|
||||
|
||||
message DeleteCheckpointResponse {
|
||||
|
@ -113,6 +118,7 @@ message DeleteCheckpointResponse {
|
|||
|
||||
message ListCheckpointRequest {
|
||||
string id = 1; // ID of container
|
||||
string checkpointDir = 2; // Directory where checkpoints are stored
|
||||
}
|
||||
|
||||
message Checkpoint {
|
||||
|
@ -193,6 +199,7 @@ message UpdateResource {
|
|||
uint32 memorySwap = 8;
|
||||
uint32 memoryReservation = 9;
|
||||
uint32 kernelMemoryLimit = 10;
|
||||
uint32 kernelTCPMemoryLimit = 11;
|
||||
}
|
||||
|
||||
message UpdateContainerResponse {
|
||||
|
|
|
@ -39,5 +39,5 @@ test: install generate-test-pbs
|
|||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
|
||||
make
|
||||
|
|
|
@ -768,11 +768,10 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
|||
}
|
||||
}
|
||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
||||
if !keyelem.IsValid() {
|
||||
keyelem = reflect.Zero(p.mtype.Key())
|
||||
}
|
||||
if !valelem.IsValid() {
|
||||
valelem = reflect.Zero(p.mtype.Elem())
|
||||
if !keyelem.IsValid() || !valelem.IsValid() {
|
||||
// We did not decode the key or the value in the map entry.
|
||||
// Either way, it's an invalid map entry.
|
||||
return fmt.Errorf("proto: bad map data: missing key/val")
|
||||
}
|
||||
|
||||
v.SetMapIndex(keyelem, valelem)
|
||||
|
|
|
@ -64,10 +64,6 @@ var (
|
|||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
)
|
||||
|
@ -1226,9 +1222,7 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
|||
// Do oneof fields.
|
||||
if prop.oneofMarshaler != nil {
|
||||
m := structPointer_Interface(base, prop.stype).(Message)
|
||||
if err := prop.oneofMarshaler(m, o); err == ErrNil {
|
||||
return errOneofHasNil
|
||||
} else if err != nil {
|
||||
if err := prop.oneofMarshaler(m, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
133
vendor/src/github.com/golang/protobuf/proto/text.go
vendored
133
vendor/src/github.com/golang/protobuf/proto/text.go
vendored
|
@ -175,93 +175,7 @@ type raw interface {
|
|||
Bytes() []byte
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
|
@ -313,7 +227,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -355,7 +269,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -372,7 +286,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -444,7 +358,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
if err := writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -456,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -486,7 +400,7 @@ func writeRaw(w *textWriter, b []byte) error {
|
|||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
|
@ -535,15 +449,15 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
|||
}
|
||||
}
|
||||
w.indent()
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := tm.writeStruct(w, v); err != nil {
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
|
@ -687,7 +601,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
|
||||
|
@ -722,13 +636,13 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
|||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -737,7 +651,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -746,7 +660,7 @@ func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -773,13 +687,12 @@ func (w *textWriter) writeIndent() {
|
|||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
Compact bool // use compact text format (one line).
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
|
@ -794,11 +707,11 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
compact: m.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -812,7 +725,7 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
|
@ -822,9 +735,9 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
func (m *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
m.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ func (p *textParser) advance() {
|
|||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
|
@ -451,10 +451,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
// "[extension]".
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
|
@ -464,66 +461,33 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
// Looks like an extension.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
if d.Name == tok.value {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
|
@ -679,35 +643,6 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
|
|
14
vendor/src/golang.org/x/net/context/context.go
vendored
14
vendor/src/golang.org/x/net/context/context.go
vendored
|
@ -189,7 +189,7 @@ func Background() Context {
|
|||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it's is not yet available (because the
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
|
@ -210,13 +210,13 @@ type CancelFunc func()
|
|||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, &c)
|
||||
return &c, func() { c.cancel(true, Canceled) }
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) cancelCtx {
|
||||
return cancelCtx{
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
|||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return &c.cancelCtx, true
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
|
@ -377,7 +377,7 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
|||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
cancelCtx
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
|
|
|
@ -9,6 +9,7 @@ package ctxhttp
|
|||
import "net/http"
|
||||
|
||||
func canceler(client *http.Client, req *http.Request) func() {
|
||||
// TODO(djd): Respect any existing value of req.Cancel.
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
|
|
|
@ -14,6 +14,14 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func nop() {}
|
||||
|
||||
var (
|
||||
testHookContextDoneBeforeHeaders = nop
|
||||
testHookDoReturned = nop
|
||||
testHookDidBodyClose = nop
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
// If the context is canceled or times out, ctx.Err() will be returned.
|
||||
|
@ -31,18 +39,51 @@ func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
|
|||
}
|
||||
result := make(chan responseAndError, 1)
|
||||
|
||||
// Make local copies of test hooks closed over by goroutines below.
|
||||
// Prevents data races in tests.
|
||||
testHookDoReturned := testHookDoReturned
|
||||
testHookDidBodyClose := testHookDidBodyClose
|
||||
|
||||
go func() {
|
||||
resp, err := client.Do(req)
|
||||
testHookDoReturned()
|
||||
result <- responseAndError{resp, err}
|
||||
}()
|
||||
|
||||
var resp *http.Response
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
testHookContextDoneBeforeHeaders()
|
||||
cancel()
|
||||
// Clean up after the goroutine calling client.Do:
|
||||
go func() {
|
||||
if r := <-result; r.resp != nil {
|
||||
testHookDidBodyClose()
|
||||
r.resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
return nil, ctx.Err()
|
||||
case r := <-result:
|
||||
return r.resp, r.err
|
||||
var err error
|
||||
resp, err = r.resp, r.err
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
case <-c:
|
||||
// The response's Body is closed.
|
||||
}
|
||||
}()
|
||||
resp.Body = ¬ifyingReader{resp.Body, c}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
|
@ -77,3 +118,28 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string,
|
|||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
||||
|
||||
// notifyingReader is an io.ReadCloser that closes the notify channel after
|
||||
// Close is called or a Read fails on the underlying ReadCloser.
|
||||
type notifyingReader struct {
|
||||
io.ReadCloser
|
||||
notify chan<- struct{}
|
||||
}
|
||||
|
||||
func (r *notifyingReader) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadCloser.Read(p)
|
||||
if err != nil && r.notify != nil {
|
||||
close(r.notify)
|
||||
r.notify = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *notifyingReader) Close() error {
|
||||
err := r.ReadCloser.Close()
|
||||
if r.notify != nil {
|
||||
close(r.notify)
|
||||
r.notify = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
15
vendor/src/golang.org/x/net/http2/Dockerfile
vendored
15
vendor/src/golang.org/x/net/http2/Dockerfile
vendored
|
@ -17,8 +17,15 @@ RUN apt-get install -y --no-install-recommends \
|
|||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER af24f8394e43f4
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
|
@ -31,9 +38,9 @@ RUN make
|
|||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz
|
||||
RUN tar -zxvf curl-7.40.0.tar.gz
|
||||
WORKDIR /root/curl-7.40.0
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
|
|
76
vendor/src/golang.org/x/net/http2/buffer.go
vendored
76
vendor/src/golang.org/x/net/http2/buffer.go
vendored
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// buffer is an io.ReadWriteCloser backed by a fixed size buffer.
|
||||
// It never allocates, but moves old data as new data is written.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
r, w int
|
||||
closed bool
|
||||
err error // err to return to reader
|
||||
}
|
||||
|
||||
var (
|
||||
errReadEmpty = errors.New("read from empty buffer")
|
||||
errWriteClosed = errors.New("write on closed buffer")
|
||||
errWriteFull = errors.New("write on full buffer")
|
||||
)
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
||||
n = copy(p, b.buf[b.r:b.w])
|
||||
b.r += n
|
||||
if b.closed && b.r == b.w {
|
||||
err = b.err
|
||||
} else if b.r == b.w && n == 0 {
|
||||
err = errReadEmpty
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *buffer) Len() int {
|
||||
return b.w - b.r
|
||||
}
|
||||
|
||||
// Write copies bytes from p into the buffer.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
if b.closed {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
|
||||
// Slide existing data to beginning.
|
||||
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||
copy(b.buf, b.buf[b.r:b.w])
|
||||
b.w -= b.r
|
||||
b.r = 0
|
||||
}
|
||||
|
||||
// Write new data.
|
||||
n = copy(b.buf[b.w:], p)
|
||||
b.w += n
|
||||
if n < len(p) {
|
||||
err = errWriteFull
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close marks the buffer as closed. Future calls to Write will
|
||||
// return an error. Future calls to Read, once the buffer is
|
||||
// empty, will return err.
|
||||
func (b *buffer) Close(err error) {
|
||||
if !b.closed {
|
||||
b.closed = true
|
||||
b.err = err
|
||||
}
|
||||
}
|
225
vendor/src/golang.org/x/net/http2/client_conn_pool.go
vendored
Normal file
225
vendor/src/golang.org/x/net/http2/client_conn_pool.go
vendored
Normal file
|
@ -0,0 +1,225 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
c.res, c.err = c.p.t.dialClientConn(addr)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
89
vendor/src/golang.org/x/net/http2/configure_transport.go
vendored
Normal file
89
vendor/src/golang.org/x/net/http2/configure_transport.go
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
connPool := new(clientConnPool)
|
||||
t2 := &Transport{
|
||||
ConnPool: noDialClientConnPool{connPool},
|
||||
t1: t1,
|
||||
}
|
||||
connPool.t = t2
|
||||
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t1.TLSClientConfig == nil {
|
||||
t1.TLSClientConfig = new(tls.Config)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
|
||||
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
|
||||
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
|
||||
}
|
||||
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
|
||||
addr := authorityAddr(authority)
|
||||
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
|
||||
go c.Close()
|
||||
return erringRoundTripper{err}
|
||||
} else if !used {
|
||||
// Turns out we don't need this c.
|
||||
// For example, two goroutines made requests to the same host
|
||||
// at the same time, both kicking off TCP dials. (since protocol
|
||||
// was unknown)
|
||||
go c.Close()
|
||||
}
|
||||
return t2
|
||||
}
|
||||
if m := t1.TLSNextProto; len(m) == 0 {
|
||||
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
|
||||
"h2": upgradeFn,
|
||||
}
|
||||
} else {
|
||||
m["h2"] = upgradeFn
|
||||
}
|
||||
return t2, nil
|
||||
}
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// convering panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
t.RegisterProtocol("https", rt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
if err == ErrNoCachedConn {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
return res, err
|
||||
}
|
54
vendor/src/golang.org/x/net/http2/errors.go
vendored
54
vendor/src/golang.org/x/net/http2/errors.go
vendored
|
@ -1,11 +1,13 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
@ -76,3 +78,45 @@ func (e StreamError) Error() string {
|
|||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
|
||||
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(ErrCodeProtocol).
|
||||
type connError struct {
|
||||
Code ErrCode
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
||||
|
|
60
vendor/src/golang.org/x/net/http2/fixed_buffer.go
vendored
Normal file
60
vendor/src/golang.org/x/net/http2/fixed_buffer.go
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
|
||||
// It never allocates, but moves old data as new data is written.
|
||||
type fixedBuffer struct {
|
||||
buf []byte
|
||||
r, w int
|
||||
}
|
||||
|
||||
var (
|
||||
errReadEmpty = errors.New("read from empty fixedBuffer")
|
||||
errWriteFull = errors.New("write on full fixedBuffer")
|
||||
)
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
|
||||
if b.r == b.w {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
n = copy(p, b.buf[b.r:b.w])
|
||||
b.r += n
|
||||
if b.r == b.w {
|
||||
b.r = 0
|
||||
b.w = 0
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *fixedBuffer) Len() int {
|
||||
return b.w - b.r
|
||||
}
|
||||
|
||||
// Write copies bytes from p into the buffer.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
|
||||
// Slide existing data to beginning.
|
||||
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||
copy(b.buf, b.buf[b.r:b.w])
|
||||
b.w -= b.r
|
||||
b.r = 0
|
||||
}
|
||||
|
||||
// Write new data.
|
||||
n = copy(b.buf[b.w:], p)
|
||||
b.w += n
|
||||
if n < len(p) {
|
||||
err = errWriteFull
|
||||
}
|
||||
return n, err
|
||||
}
|
7
vendor/src/golang.org/x/net/http2/flow.go
vendored
7
vendor/src/golang.org/x/net/http2/flow.go
vendored
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
|
|
431
vendor/src/golang.org/x/net/http2/frame.go
vendored
431
vendor/src/golang.org/x/net/http2/frame.go
vendored
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
|
@ -11,7 +10,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
const frameHeaderLen = 9
|
||||
|
@ -172,6 +175,12 @@ func (h FrameHeader) Header() FrameHeader { return h }
|
|||
func (h FrameHeader) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("[FrameHeader ")
|
||||
h.writeDebug(&buf)
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
|
||||
buf.WriteString(h.Type.String())
|
||||
if h.Flags != 0 {
|
||||
buf.WriteString(" flags=")
|
||||
|
@ -188,15 +197,14 @@ func (h FrameHeader) String() string {
|
|||
if name != "" {
|
||||
buf.WriteString(name)
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "0x%x", 1<<i)
|
||||
fmt.Fprintf(buf, "0x%x", 1<<i)
|
||||
}
|
||||
}
|
||||
}
|
||||
if h.StreamID != 0 {
|
||||
fmt.Fprintf(&buf, " stream=%d", h.StreamID)
|
||||
fmt.Fprintf(buf, " stream=%d", h.StreamID)
|
||||
}
|
||||
fmt.Fprintf(&buf, " len=%d]", h.Length)
|
||||
return buf.String()
|
||||
fmt.Fprintf(buf, " len=%d", h.Length)
|
||||
}
|
||||
|
||||
func (h *FrameHeader) checkValid() {
|
||||
|
@ -256,6 +264,11 @@ type Frame interface {
|
|||
type Framer struct {
|
||||
r io.Reader
|
||||
lastFrame Frame
|
||||
errDetail error
|
||||
|
||||
// lastHeaderStream is non-zero if the last frame was an
|
||||
// unfinished HEADERS/CONTINUATION.
|
||||
lastHeaderStream uint32
|
||||
|
||||
maxReadSize uint32
|
||||
headerBuf [frameHeaderLen]byte
|
||||
|
@ -272,18 +285,48 @@ type Framer struct {
|
|||
wbuf []byte
|
||||
|
||||
// AllowIllegalWrites permits the Framer's Write methods to
|
||||
// write frames that do not conform to the HTTP/2 spec. This
|
||||
// write frames that do not conform to the HTTP/2 spec. This
|
||||
// permits using the Framer to test other HTTP/2
|
||||
// implementations' conformance to the spec.
|
||||
// If false, the Write methods will prefer to return an error
|
||||
// rather than comply.
|
||||
AllowIllegalWrites bool
|
||||
|
||||
// AllowIllegalReads permits the Framer's ReadFrame method
|
||||
// to return non-compliant frames or frame orders.
|
||||
// This is for testing and permits using the Framer to test
|
||||
// other HTTP/2 implementations' conformance to the spec.
|
||||
// It is not compatible with ReadMetaHeaders.
|
||||
AllowIllegalReads bool
|
||||
|
||||
// ReadMetaHeaders if non-nil causes ReadFrame to merge
|
||||
// HEADERS and CONTINUATION frames together and return
|
||||
// MetaHeadersFrame instead.
|
||||
ReadMetaHeaders *hpack.Decoder
|
||||
|
||||
// MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
|
||||
// It's used only if ReadMetaHeaders is set; 0 means a sane default
|
||||
// (currently 16MB)
|
||||
// If the limit is hit, MetaHeadersFrame.Truncated is set true.
|
||||
MaxHeaderListSize uint32
|
||||
|
||||
// TODO: track which type of frame & with which flags was sent
|
||||
// last. Then return an error (unless AllowIllegalWrites) if
|
||||
// we're in the middle of a header block and a
|
||||
// non-Continuation or Continuation on a different stream is
|
||||
// attempted to be written.
|
||||
|
||||
logReads bool
|
||||
|
||||
debugFramer *Framer // only use for logging written writes
|
||||
debugFramerBuf *bytes.Buffer
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderListSize() uint32 {
|
||||
if fr.MaxHeaderListSize == 0 {
|
||||
return 16 << 20 // sane default, per docs
|
||||
}
|
||||
return fr.MaxHeaderListSize
|
||||
}
|
||||
|
||||
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
|
||||
|
@ -311,6 +354,10 @@ func (f *Framer) endWrite() error {
|
|||
byte(length>>16),
|
||||
byte(length>>8),
|
||||
byte(length))
|
||||
if logFrameWrites {
|
||||
f.logWrite()
|
||||
}
|
||||
|
||||
n, err := f.w.Write(f.wbuf)
|
||||
if err == nil && n != len(f.wbuf) {
|
||||
err = io.ErrShortWrite
|
||||
|
@ -318,6 +365,24 @@ func (f *Framer) endWrite() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (f *Framer) logWrite() {
|
||||
if f.debugFramer == nil {
|
||||
f.debugFramerBuf = new(bytes.Buffer)
|
||||
f.debugFramer = NewFramer(nil, f.debugFramerBuf)
|
||||
f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
|
||||
// Let us read anything, even if we accidentally wrote it
|
||||
// in the wrong order:
|
||||
f.debugFramer.AllowIllegalReads = true
|
||||
}
|
||||
f.debugFramerBuf.Write(f.wbuf)
|
||||
fr, err := f.debugFramer.ReadFrame()
|
||||
if err != nil {
|
||||
log.Printf("http2: Framer %p: failed to decode just-written frame", f)
|
||||
return
|
||||
}
|
||||
log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
|
||||
}
|
||||
|
||||
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
|
||||
func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
|
||||
func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
|
||||
|
@ -333,8 +398,9 @@ const (
|
|||
// NewFramer returns a Framer that writes frames to w and reads them from r.
|
||||
func NewFramer(w io.Writer, r io.Reader) *Framer {
|
||||
fr := &Framer{
|
||||
w: w,
|
||||
r: r,
|
||||
w: w,
|
||||
r: r,
|
||||
logReads: logFrameReads,
|
||||
}
|
||||
fr.getReadBuf = func(size uint32) []byte {
|
||||
if cap(fr.readBuf) >= int(size) {
|
||||
|
@ -358,15 +424,39 @@ func (fr *Framer) SetMaxReadFrameSize(v uint32) {
|
|||
fr.maxReadSize = v
|
||||
}
|
||||
|
||||
// ErrorDetail returns a more detailed error of the last error
|
||||
// returned by Framer.ReadFrame. For instance, if ReadFrame
|
||||
// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
|
||||
// will say exactly what was invalid. ErrorDetail is not guaranteed
|
||||
// to return a non-nil value and like the rest of the http2 package,
|
||||
// its return value is not protected by an API compatibility promise.
|
||||
// ErrorDetail is reset after the next call to ReadFrame.
|
||||
func (fr *Framer) ErrorDetail() error {
|
||||
return fr.errDetail
|
||||
}
|
||||
|
||||
// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
|
||||
// sends a frame that is larger than declared with SetMaxReadFrameSize.
|
||||
var ErrFrameTooLarge = errors.New("http2: frame too large")
|
||||
|
||||
// terminalReadFrameError reports whether err is an unrecoverable
|
||||
// error from ReadFrame and no other frames should be read.
|
||||
func terminalReadFrameError(err error) bool {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
return false
|
||||
}
|
||||
return err != nil
|
||||
}
|
||||
|
||||
// ReadFrame reads a single frame. The returned Frame is only valid
|
||||
// until the next call to ReadFrame.
|
||||
// If the frame is larger than previously set with SetMaxReadFrameSize,
|
||||
// the returned error is ErrFrameTooLarge.
|
||||
//
|
||||
// If the frame is larger than previously set with SetMaxReadFrameSize, the
|
||||
// returned error is ErrFrameTooLarge. Other errors may be of type
|
||||
// ConnectionError, StreamError, or anything else from from the underlying
|
||||
// reader.
|
||||
func (fr *Framer) ReadFrame() (Frame, error) {
|
||||
fr.errDetail = nil
|
||||
if fr.lastFrame != nil {
|
||||
fr.lastFrame.invalidate()
|
||||
}
|
||||
|
@ -383,12 +473,71 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
|||
}
|
||||
f, err := typeFrameParser(fh.Type)(fh, payload)
|
||||
if err != nil {
|
||||
if ce, ok := err.(connError); ok {
|
||||
return nil, fr.connError(ce.Code, ce.Reason)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
fr.lastFrame = f
|
||||
if err := fr.checkFrameOrder(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fr.logReads {
|
||||
log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
|
||||
}
|
||||
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
|
||||
return fr.readMetaFrame(f.(*HeadersFrame))
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connError returns ConnectionError(code) but first
|
||||
// stashes away a public reason to the caller can optionally relay it
|
||||
// to the peer before hanging up on them. This might help others debug
|
||||
// their implementations.
|
||||
func (fr *Framer) connError(code ErrCode, reason string) error {
|
||||
fr.errDetail = errors.New(reason)
|
||||
return ConnectionError(code)
|
||||
}
|
||||
|
||||
// checkFrameOrder reports an error if f is an invalid frame to return
|
||||
// next from ReadFrame. Mostly it checks whether HEADERS and
|
||||
// CONTINUATION frames are contiguous.
|
||||
func (fr *Framer) checkFrameOrder(f Frame) error {
|
||||
last := fr.lastFrame
|
||||
fr.lastFrame = f
|
||||
if fr.AllowIllegalReads {
|
||||
return nil
|
||||
}
|
||||
|
||||
fh := f.Header()
|
||||
if fr.lastHeaderStream != 0 {
|
||||
if fh.Type != FrameContinuation {
|
||||
return fr.connError(ErrCodeProtocol,
|
||||
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
|
||||
fh.Type, fh.StreamID,
|
||||
last.Header().Type, fr.lastHeaderStream))
|
||||
}
|
||||
if fh.StreamID != fr.lastHeaderStream {
|
||||
return fr.connError(ErrCodeProtocol,
|
||||
fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
|
||||
fh.StreamID, fr.lastHeaderStream))
|
||||
}
|
||||
} else if fh.Type == FrameContinuation {
|
||||
return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
|
||||
}
|
||||
|
||||
switch fh.Type {
|
||||
case FrameHeaders, FrameContinuation:
|
||||
if fh.Flags.Has(FlagHeadersEndHeaders) {
|
||||
fr.lastHeaderStream = 0
|
||||
} else {
|
||||
fr.lastHeaderStream = fh.StreamID
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// A DataFrame conveys arbitrary, variable-length sequences of octets
|
||||
// associated with a stream.
|
||||
// See http://http2.github.io/http2-spec/#rfc.section.6.1
|
||||
|
@ -417,7 +566,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
|||
// field is 0x0, the recipient MUST respond with a
|
||||
// connection error (Section 5.4.1) of type
|
||||
// PROTOCOL_ERROR.
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
||||
}
|
||||
f := &DataFrame{
|
||||
FrameHeader: fh,
|
||||
|
@ -435,7 +584,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
|||
// length of the frame payload, the recipient MUST
|
||||
// treat this as a connection error.
|
||||
// Filed: https://github.com/http2/http2-spec/issues/610
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
|
||||
}
|
||||
f.data = payload[:len(payload)-int(padSize)]
|
||||
return f, nil
|
||||
|
@ -575,6 +724,8 @@ type PingFrame struct {
|
|||
Data [8]byte
|
||||
}
|
||||
|
||||
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
||||
|
||||
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||
if len(payload) != 8 {
|
||||
return nil, ConnectionError(ErrCodeFrameSize)
|
||||
|
@ -663,7 +814,7 @@ func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
|
|||
// See http://http2.github.io/http2-spec/#rfc.section.6.9
|
||||
type WindowUpdateFrame struct {
|
||||
FrameHeader
|
||||
Increment uint32
|
||||
Increment uint32 // never read with high bit set
|
||||
}
|
||||
|
||||
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||
|
@ -740,7 +891,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
|
|||
// is received whose stream identifier field is 0x0, the recipient MUST
|
||||
// respond with a connection error (Section 5.4.1) of type
|
||||
// PROTOCOL_ERROR.
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
|
||||
}
|
||||
var padLength uint8
|
||||
if fh.Flags.Has(FlagHeadersPadded) {
|
||||
|
@ -870,10 +1021,10 @@ func (p PriorityParam) IsZero() bool {
|
|||
|
||||
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||
if fh.StreamID == 0 {
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
||||
}
|
||||
if len(payload) != 5 {
|
||||
return nil, ConnectionError(ErrCodeFrameSize)
|
||||
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
|
||||
}
|
||||
v := binary.BigEndian.Uint32(payload[:4])
|
||||
streamID := v & 0x7fffffff // mask off high bit
|
||||
|
@ -943,13 +1094,12 @@ type ContinuationFrame struct {
|
|||
}
|
||||
|
||||
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||
if fh.StreamID == 0 {
|
||||
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
||||
}
|
||||
return &ContinuationFrame{fh, p}, nil
|
||||
}
|
||||
|
||||
func (f *ContinuationFrame) StreamEnded() bool {
|
||||
return f.FrameHeader.Flags.Has(FlagDataEndStream)
|
||||
}
|
||||
|
||||
func (f *ContinuationFrame) HeaderBlockFragment() []byte {
|
||||
f.checkValid()
|
||||
return f.headerFragBuf
|
||||
|
@ -1111,3 +1261,236 @@ type streamEnder interface {
|
|||
type headersEnder interface {
|
||||
HeadersEnded() bool
|
||||
}
|
||||
|
||||
type headersOrContinuation interface {
|
||||
headersEnder
|
||||
HeaderBlockFragment() []byte
|
||||
}
|
||||
|
||||
// A MetaHeadersFrame is the representation of one HEADERS frame and
|
||||
// zero or more contiguous CONTINUATION frames and the decoding of
|
||||
// their HPACK-encoded contents.
|
||||
//
|
||||
// This type of frame does not appear on the wire and is only returned
|
||||
// by the Framer when Framer.ReadMetaHeaders is set.
|
||||
type MetaHeadersFrame struct {
|
||||
*HeadersFrame
|
||||
|
||||
// Fields are the fields contained in the HEADERS and
|
||||
// CONTINUATION frames. The underlying slice is owned by the
|
||||
// Framer and must not be retained after the next call to
|
||||
// ReadFrame.
|
||||
//
|
||||
// Fields are guaranteed to be in the correct http2 order and
|
||||
// not have unknown pseudo header fields or invalid header
|
||||
// field names or values. Required pseudo header fields may be
|
||||
// missing, however. Use the MetaHeadersFrame.Pseudo accessor
|
||||
// method access pseudo headers.
|
||||
Fields []hpack.HeaderField
|
||||
|
||||
// Truncated is whether the max header list size limit was hit
|
||||
// and Fields is incomplete. The hpack decoder state is still
|
||||
// valid, however.
|
||||
Truncated bool
|
||||
}
|
||||
|
||||
// PseudoValue returns the given pseudo header field's value.
|
||||
// The provided pseudo field should not contain the leading colon.
|
||||
func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
|
||||
for _, hf := range mh.Fields {
|
||||
if !hf.IsPseudo() {
|
||||
return ""
|
||||
}
|
||||
if hf.Name[1:] == pseudo {
|
||||
return hf.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RegularFields returns the regular (non-pseudo) header fields of mh.
|
||||
// The caller does not own the returned slice.
|
||||
func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
|
||||
for i, hf := range mh.Fields {
|
||||
if !hf.IsPseudo() {
|
||||
return mh.Fields[i:]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PseudoFields returns the pseudo header fields of mh.
|
||||
// The caller does not own the returned slice.
|
||||
func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
|
||||
for i, hf := range mh.Fields {
|
||||
if !hf.IsPseudo() {
|
||||
return mh.Fields[:i]
|
||||
}
|
||||
}
|
||||
return mh.Fields
|
||||
}
|
||||
|
||||
func (mh *MetaHeadersFrame) checkPseudos() error {
|
||||
var isRequest, isResponse bool
|
||||
pf := mh.PseudoFields()
|
||||
for i, hf := range pf {
|
||||
switch hf.Name {
|
||||
case ":method", ":path", ":scheme", ":authority":
|
||||
isRequest = true
|
||||
case ":status":
|
||||
isResponse = true
|
||||
default:
|
||||
return pseudoHeaderError(hf.Name)
|
||||
}
|
||||
// Check for duplicates.
|
||||
// This would be a bad algorithm, but N is 4.
|
||||
// And this doesn't allocate.
|
||||
for _, hf2 := range pf[:i] {
|
||||
if hf.Name == hf2.Name {
|
||||
return duplicatePseudoHeaderError(hf.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if isRequest && isResponse {
|
||||
return errMixPseudoHeaderTypes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
// merge them into into the provided hf and returns a MetaHeadersFrame
|
||||
// with the decoded hpack values.
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
if fr.AllowIllegalReads {
|
||||
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
||||
}
|
||||
mh := &MetaHeadersFrame{
|
||||
HeadersFrame: hf,
|
||||
}
|
||||
var remainSize = fr.maxHeaderListSize()
|
||||
var sawRegular bool
|
||||
|
||||
var invalid error // pseudo header field errors
|
||||
hdec := fr.ReadMetaHeaders
|
||||
hdec.SetEmitEnabled(true)
|
||||
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
|
||||
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
|
||||
if !validHeaderFieldValue(hf.Value) {
|
||||
invalid = headerFieldValueError(hf.Value)
|
||||
}
|
||||
isPseudo := strings.HasPrefix(hf.Name, ":")
|
||||
if isPseudo {
|
||||
if sawRegular {
|
||||
invalid = errPseudoAfterRegular
|
||||
}
|
||||
} else {
|
||||
sawRegular = true
|
||||
if !validHeaderFieldName(hf.Name) {
|
||||
invalid = headerFieldNameError(hf.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if invalid != nil {
|
||||
hdec.SetEmitEnabled(false)
|
||||
return
|
||||
}
|
||||
|
||||
size := hf.Size()
|
||||
if size > remainSize {
|
||||
hdec.SetEmitEnabled(false)
|
||||
mh.Truncated = true
|
||||
return
|
||||
}
|
||||
remainSize -= size
|
||||
|
||||
mh.Fields = append(mh.Fields, hf)
|
||||
})
|
||||
// Lose reference to MetaHeadersFrame:
|
||||
defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
|
||||
|
||||
var hc headersOrContinuation = hf
|
||||
for {
|
||||
frag := hc.HeaderBlockFragment()
|
||||
if _, err := hdec.Write(frag); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
|
||||
if hc.HeadersEnded() {
|
||||
break
|
||||
}
|
||||
if f, err := fr.ReadFrame(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
|
||||
}
|
||||
}
|
||||
|
||||
mh.HeadersFrame.headerFragBuf = nil
|
||||
mh.HeadersFrame.invalidate()
|
||||
|
||||
if err := hdec.Close(); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
if invalid != nil {
|
||||
fr.errDetail = invalid
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
||||
}
|
||||
if err := mh.checkPseudos(); err != nil {
|
||||
fr.errDetail = err
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
||||
}
|
||||
return mh, nil
|
||||
}
|
||||
|
||||
func summarizeFrame(f Frame) string {
|
||||
var buf bytes.Buffer
|
||||
f.Header().writeDebug(&buf)
|
||||
switch f := f.(type) {
|
||||
case *SettingsFrame:
|
||||
n := 0
|
||||
f.ForeachSetting(func(s Setting) error {
|
||||
n++
|
||||
if n == 1 {
|
||||
buf.WriteString(", settings:")
|
||||
}
|
||||
fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
|
||||
return nil
|
||||
})
|
||||
if n > 0 {
|
||||
buf.Truncate(buf.Len() - 1) // remove trailing comma
|
||||
}
|
||||
case *DataFrame:
|
||||
data := f.Data()
|
||||
const max = 256
|
||||
if len(data) > max {
|
||||
data = data[:max]
|
||||
}
|
||||
fmt.Fprintf(&buf, " data=%q", data)
|
||||
if len(f.Data()) > max {
|
||||
fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
|
||||
}
|
||||
case *WindowUpdateFrame:
|
||||
if f.StreamID == 0 {
|
||||
buf.WriteString(" (conn)")
|
||||
}
|
||||
fmt.Fprintf(&buf, " incr=%v", f.Increment)
|
||||
case *PingFrame:
|
||||
fmt.Fprintf(&buf, " ping=%q", f.Data[:])
|
||||
case *GoAwayFrame:
|
||||
fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
|
||||
f.LastStreamID, f.ErrCode, f.debugData)
|
||||
case *RSTStreamFrame:
|
||||
fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
|
11
vendor/src/golang.org/x/net/http2/go15.go
vendored
Normal file
11
vendor/src/golang.org/x/net/http2/go15.go
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel }
|
3
vendor/src/golang.org/x/net/http2/gotrack.go
vendored
3
vendor/src/golang.org/x/net/http2/gotrack.go
vendored
|
@ -1,9 +1,6 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
|
@ -60,6 +57,7 @@ func init() {
|
|||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"trailer",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
|
@ -145,7 +144,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
|||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.size() <= e.dynTab.maxSize
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
|
|
155
vendor/src/golang.org/x/net/http2/hpack/hpack.go
vendored
155
vendor/src/golang.org/x/net/http2/hpack/hpack.go
vendored
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
|
@ -42,7 +41,24 @@ type HeaderField struct {
|
|||
Sensitive bool
|
||||
}
|
||||
|
||||
func (hf *HeaderField) size() uint32 {
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid psuedo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7540 section 5.2.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
|
@ -64,23 +80,65 @@ type Decoder struct {
|
|||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // usually not owned
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
}
|
||||
|
||||
func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
}
|
||||
d.dynTab.allowedMaxSize = maxSize
|
||||
d.dynTab.setMaxSize(maxSize)
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
|
@ -122,7 +180,7 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
|
|||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.ents = append(dt.ents, f)
|
||||
dt.size += f.size()
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
|
@ -130,7 +188,7 @@ func (dt *dynamicTable) add(f HeaderField) {
|
|||
func (dt *dynamicTable) evict() {
|
||||
base := dt.ents // keep base pointer of slice
|
||||
for dt.size > dt.maxSize {
|
||||
dt.size -= dt.ents[0].size()
|
||||
dt.size -= dt.ents[0].Size()
|
||||
dt.ents = dt.ents[1:]
|
||||
}
|
||||
|
||||
|
@ -247,15 +305,23 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
|
|||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err != nil {
|
||||
if err == errNeedMore {
|
||||
err = nil
|
||||
d.saveBuf.Write(d.buf)
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
|
@ -323,9 +389,8 @@ func (d *Decoder) parseFieldIndexed() error {
|
|||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.emit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
d.buf = buf
|
||||
return nil
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
|
@ -337,6 +402,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
|
@ -344,12 +410,12 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = readString(buf)
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = readString(buf)
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -358,7 +424,18 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
d.emit(hf)
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -420,7 +497,15 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
|||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
func readString(p []byte) (s string, remain []byte, err error) {
|
||||
// readString decodes an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
|
@ -429,17 +514,29 @@ func readString(p []byte) (s string, remain []byte, err error) {
|
|||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
return string(p[:strLen]), p[strLen:], nil
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
// TODO: optimize this garbage:
|
||||
var buf bytes.Buffer
|
||||
if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil {
|
||||
return "", nil, err
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return buf.String(), p[strLen:], nil
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
@ -22,15 +22,46 @@ func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
|||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
n := rootHuffmanNode
|
||||
cur, nbits := uint(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
nbits += 8
|
||||
for nbits >= 8 {
|
||||
n = n.children[byte(cur>>(nbits-8))]
|
||||
idx := byte(cur >> (nbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
|
@ -48,7 +79,7 @@ func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
|||
nbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
|
@ -67,10 +98,10 @@ func newInternalNode() *node {
|
|||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func init() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
for i, code := range huffmanCodes {
|
||||
if i > 255 {
|
||||
panic("too many huffman codes")
|
||||
}
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
|
@ -10,7 +9,7 @@ func pair(name, value string) HeaderField {
|
|||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = []HeaderField{
|
||||
var staticTable = [...]HeaderField{
|
||||
pair(":authority", ""), // index 1 (1-based)
|
||||
pair(":method", "GET"),
|
||||
pair(":method", "POST"),
|
||||
|
@ -74,7 +73,7 @@ var staticTable = []HeaderField{
|
|||
pair("www-authenticate", ""),
|
||||
}
|
||||
|
||||
var huffmanCodes = []uint32{
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
|
@ -333,7 +332,7 @@ var huffmanCodes = []uint32{
|
|||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = []uint8{
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
|
|
249
vendor/src/golang.org/x/net/http2/http2.go
vendored
249
vendor/src/golang.org/x/net/http2/http2.go
vendored
|
@ -1,31 +1,51 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This is a work in progress. This package is low-level and intended
|
||||
// to be used directly by very few people. Most users will use it
|
||||
// indirectly through integration with the net/http package. See
|
||||
// ConfigureServer. That ConfigureServer call will likely be automatic
|
||||
// or available via an empty import in the future.
|
||||
// This package is low-level and intended to be used directly by very
|
||||
// few people. Most users will use it indirectly through the automatic
|
||||
// use by the net/http package (from Go 1.6 and later).
|
||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||
// requires Go 1.6 or later)
|
||||
//
|
||||
// See http://http2.github.io/
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
//
|
||||
// See https://http2.golang.org/ for a test server running this code.
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var VerboseLogs = false
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") {
|
||||
VerboseLogs = true
|
||||
}
|
||||
if strings.Contains(e, "http2debug=2") {
|
||||
VerboseLogs = true
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
|
@ -141,17 +161,63 @@ func (s SettingID) String() string {
|
|||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
func validHeader(v string) bool {
|
||||
var (
|
||||
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
||||
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
||||
)
|
||||
|
||||
// validHeaderFieldName reports whether v is a valid header field name (key).
|
||||
// RFC 7230 says:
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
// Further, http2 says:
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
// characters that are compared in a case-insensitive
|
||||
// fashion. However, header field names MUST be converted to
|
||||
// lowercase prior to their encoding in HTTP/2. "
|
||||
func validHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
// "Just as in HTTP/1.x, header field names are
|
||||
// strings of ASCII characters that are compared in a
|
||||
// case-insensitive fashion. However, header field
|
||||
// names MUST be converted to lowercase prior to their
|
||||
// encoding in HTTP/2. "
|
||||
if r >= 127 || ('A' <= r && r <= 'Z') {
|
||||
if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') {
|
||||
return false
|
||||
}
|
||||
if !isTokenTable[byte(r)] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// validHeaderFieldValue reports whether v is a valid header field value.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func validHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if b := v[i]; b < ' ' && b != '\t' || b == 0x7f {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -247,3 +313,152 @@ func (w *bufferedWriter) Flush() error {
|
|||
w.bw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
}
|
||||
return uint32(v)
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC2616, section 4.4.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string { return e.msg }
|
||||
func (e *httpError) Timeout() bool { return e.timeout }
|
||||
func (e *httpError) Temporary() bool { return true }
|
||||
|
||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
type connectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
|
||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||
|
||||
type sorter struct {
|
||||
v []string // owned by sorter
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int { return len(s.v) }
|
||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||
|
||||
// Keys returns the sorted keys of h.
|
||||
//
|
||||
// The returned slice is only valid until s used again or returned to
|
||||
// its pool.
|
||||
func (s *sorter) Keys(h http.Header) []string {
|
||||
keys := s.v[:0]
|
||||
for k := range h {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.v = keys
|
||||
sort.Sort(s)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owners, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
|
|
11
vendor/src/golang.org/x/net/http2/not_go15.go
vendored
Normal file
11
vendor/src/golang.org/x/net/http2/not_go15.go
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCancel(req *http.Request) <-chan struct{} { return nil }
|
13
vendor/src/golang.org/x/net/http2/not_go16.go
vendored
Normal file
13
vendor/src/golang.org/x/net/http2/not_go16.go
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
return nil, errTransportVersion
|
||||
}
|
150
vendor/src/golang.org/x/net/http2/pipe.go
vendored
150
vendor/src/golang.org/x/net/http2/pipe.go
vendored
|
@ -1,43 +1,147 @@
|
|||
// Copyright 2014 The Go Authors.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||
type pipe struct {
|
||||
b buffer
|
||||
c sync.Cond
|
||||
m sync.Mutex
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
readFn func() // optional code to run in Read before error
|
||||
}
|
||||
|
||||
type pipeBuffer interface {
|
||||
Len() int
|
||||
io.Writer
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (r *pipe) Read(p []byte) (n int, err error) {
|
||||
r.c.L.Lock()
|
||||
defer r.c.L.Unlock()
|
||||
for r.b.Len() == 0 && !r.b.closed {
|
||||
r.c.Wait()
|
||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
for {
|
||||
if p.breakErr != nil {
|
||||
return 0, p.breakErr
|
||||
}
|
||||
if p.b.Len() > 0 {
|
||||
return p.b.Read(d)
|
||||
}
|
||||
if p.err != nil {
|
||||
if p.readFn != nil {
|
||||
p.readFn() // e.g. copy trailers
|
||||
p.readFn = nil // not sticky like p.err
|
||||
}
|
||||
return 0, p.err
|
||||
}
|
||||
p.c.Wait()
|
||||
}
|
||||
return r.b.Read(p)
|
||||
}
|
||||
|
||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (w *pipe) Write(p []byte) (n int, err error) {
|
||||
w.c.L.Lock()
|
||||
defer w.c.L.Unlock()
|
||||
defer w.c.Signal()
|
||||
return w.b.Write(p)
|
||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if p.err != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
func (c *pipe) Close(err error) {
|
||||
c.c.L.Lock()
|
||||
defer c.c.L.Unlock()
|
||||
defer c.c.Signal()
|
||||
c.b.Close(err)
|
||||
// CloseWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err after all data has been
|
||||
// read.
|
||||
//
|
||||
// The error must be non-nil.
|
||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||
|
||||
// BreakWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err immediately, without
|
||||
// waiting for unread data.
|
||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||
|
||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||
// in the caller's goroutine before returning the error.
|
||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||
|
||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
if err == nil {
|
||||
panic("err must be non-nil")
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if *dst != nil {
|
||||
// Already been done.
|
||||
return
|
||||
}
|
||||
p.readFn = fn
|
||||
*dst = err
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
|
||||
// requires p.mu be held.
|
||||
func (p *pipe) closeDoneLocked() {
|
||||
if p.donec == nil {
|
||||
return
|
||||
}
|
||||
// Close if unclosed. This isn't racy since we always
|
||||
// hold p.mu while closing.
|
||||
select {
|
||||
case <-p.donec:
|
||||
default:
|
||||
close(p.donec)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||
func (p *pipe) Err() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.breakErr != nil {
|
||||
return p.breakErr
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if and when this pipe is closed
|
||||
// with CloseWithError.
|
||||
func (p *pipe) Done() <-chan struct{} {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.donec == nil {
|
||||
p.donec = make(chan struct{})
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
// Already hit an error.
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
}
|
||||
return p.donec
|
||||
}
|
||||
|
|
1116
vendor/src/golang.org/x/net/http2/server.go
vendored
1116
vendor/src/golang.org/x/net/http2/server.go
vendored
File diff suppressed because it is too large
Load diff
1689
vendor/src/golang.org/x/net/http2/transport.go
vendored
1689
vendor/src/golang.org/x/net/http2/transport.go
vendored
File diff suppressed because it is too large
Load diff
98
vendor/src/golang.org/x/net/http2/write.go
vendored
98
vendor/src/golang.org/x/net/http2/write.go
vendored
|
@ -1,15 +1,13 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -26,7 +24,11 @@ type writeFramer interface {
|
|||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
// TODO: use it from the client code too, once it exists.
|
||||
//
|
||||
// TODO: decide whether to a) use this in the client code (which didn't
|
||||
// end up using this yet, because it has a simpler design, not
|
||||
// currently implementing priorities), or b) delete this and
|
||||
// make the server code a bit more concrete.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
|
@ -44,6 +46,11 @@ func endsStream(w writeFramer) bool {
|
|||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
case nil:
|
||||
// This can only happen if the caller reuses w after it's
|
||||
// been intentionally nil'ed out to prevent use. Keep this
|
||||
// here to catch future refactoring breaking it.
|
||||
panic("endsStream called on nil writeFramer")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -89,6 +96,16 @@ func (w *writeData) writeFrame(ctx writeContext) error {
|
|||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
// handlerPanicRST is the message sent from handler goroutines when
|
||||
// the handler panics.
|
||||
type handlerPanicRST struct {
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||
}
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
@ -106,40 +123,48 @@ func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
|||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers from a server handler.
|
||||
// for HTTP response headers or trailers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int
|
||||
httpResCode int // 0 means no ":status" line
|
||||
h http.Header // may be nil
|
||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||
endStream bool
|
||||
|
||||
date string
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func encKV(enc *hpack.Encoder, k, v string) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)})
|
||||
for k, vv := range w.h {
|
||||
k = lowerHeader(k)
|
||||
for _, v := range vv {
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if k == "transfer-encoding" && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
if w.httpResCode != 0 {
|
||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||
}
|
||||
|
||||
encodeHeaders(enc, w.h, w.trailers)
|
||||
|
||||
if w.contentType != "" {
|
||||
enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType})
|
||||
encKV(enc, "content-type", w.contentType)
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength})
|
||||
encKV(enc, "content-length", w.contentLength)
|
||||
}
|
||||
if w.date != "" {
|
||||
encKV(enc, "date", w.date)
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 {
|
||||
if len(headerBlock) == 0 && w.trailers == nil {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
|
@ -185,7 +210,7 @@ type write100ContinueHeadersFrame struct {
|
|||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
|
||||
encKV(enc, ":status", "100")
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
|
@ -202,3 +227,36 @@ type writeWindowUpdate struct {
|
|||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
||||
|
||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
if keys == nil {
|
||||
sorter := sorterPool.Get().(*sorter)
|
||||
// Using defer here, since the returned keys from the
|
||||
// sorter.Keys method is only valid until the sorter
|
||||
// is returned:
|
||||
defer sorterPool.Put(sorter)
|
||||
keys = sorter.Keys(h)
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k = lowerHeader(k)
|
||||
if !validHeaderFieldName(k) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !validHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if isTE && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
encKV(enc, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
|
||||
// Licensed under the same terms as Go itself:
|
||||
// https://code.google.com/p/go/source/browse/LICENSE
|
||||
|
||||
package http2
|
||||
|
||||
|
|
15
vendor/src/golang.org/x/net/trace/trace.go
vendored
15
vendor/src/golang.org/x/net/trace/trace.go
vendored
|
@ -95,11 +95,14 @@ var DebugUseAfterFinish = false
|
|||
//
|
||||
// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1].
|
||||
var AuthRequest = func(req *http.Request) (any, sensitive bool) {
|
||||
// RemoteAddr is commonly in the form "IP" or "IP:port".
|
||||
// If it is in the form "IP:port", split off the port.
|
||||
host, _, err := net.SplitHostPort(req.RemoteAddr)
|
||||
switch {
|
||||
case err != nil: // Badly formed address; fail closed.
|
||||
return false, false
|
||||
case host == "localhost" || host == "127.0.0.1" || host == "::1":
|
||||
if err != nil {
|
||||
host = req.RemoteAddr
|
||||
}
|
||||
switch host {
|
||||
case "localhost", "127.0.0.1", "::1":
|
||||
return true, true
|
||||
default:
|
||||
return false, false
|
||||
|
@ -113,6 +116,7 @@ func init() {
|
|||
http.Error(w, "not allowed", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
Render(w, req, sensitive)
|
||||
})
|
||||
http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
|
||||
|
@ -121,6 +125,7 @@ func init() {
|
|||
http.Error(w, "not allowed", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
RenderEvents(w, req, sensitive)
|
||||
})
|
||||
}
|
||||
|
@ -172,7 +177,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) {
|
|||
|
||||
completedMu.RLock()
|
||||
data.Families = make([]string, 0, len(completedTraces))
|
||||
for fam, _ := range completedTraces {
|
||||
for fam := range completedTraces {
|
||||
data.Families = append(data.Families, fam)
|
||||
}
|
||||
completedMu.RUnlock()
|
||||
|
|
|
@ -144,6 +144,8 @@ type frameHandler interface {
|
|||
}
|
||||
|
||||
// Conn represents a WebSocket connection.
|
||||
//
|
||||
// Multiple goroutines may invoke methods on a Conn simultaneously.
|
||||
type Conn struct {
|
||||
config *Config
|
||||
request *http.Request
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5.3
|
||||
- 1.6
|
||||
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
|
@ -11,4 +15,3 @@ install:
|
|||
|
||||
script:
|
||||
- make test testrace
|
||||
- make coverage
|
||||
|
|
|
@ -1,16 +1,39 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to grpc! Here is some guideline
|
||||
We definitely welcome patches and contribution to grpc! Here are some guidelines
|
||||
and information about how to do so.
|
||||
|
||||
## Getting started
|
||||
## Sending patches
|
||||
|
||||
### Legal requirements
|
||||
### Getting started
|
||||
|
||||
1. Check out the code:
|
||||
|
||||
$ go get google.golang.org/grpc
|
||||
$ cd $GOPATH/src/google.golang.org/grpc
|
||||
|
||||
1. Create a fork of the grpc-go repository.
|
||||
1. Add your fork as a remote:
|
||||
|
||||
$ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
|
||||
|
||||
1. Make changes, commit them.
|
||||
1. Run the test suite:
|
||||
|
||||
$ make test
|
||||
|
||||
1. Push your changes to your fork:
|
||||
|
||||
$ git push fork ...
|
||||
|
||||
1. Open a pull request.
|
||||
|
||||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
### Filing Issues
|
||||
## Filing Issues
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
|
|
29
vendor/src/google.golang.org/grpc/Makefile
vendored
29
vendor/src/google.golang.org/grpc/Makefile
vendored
|
@ -1,15 +1,3 @@
|
|||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
|
||||
all: test testrace
|
||||
|
||||
deps:
|
||||
|
@ -32,7 +20,7 @@ proto:
|
|||
echo "error: protoc not installed" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -v github.com/golang/protobuf/protoc-gen-go
|
||||
go get -u -v github.com/golang/protobuf/protoc-gen-go
|
||||
for file in $$(git ls-files '*.proto'); do \
|
||||
protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
|
||||
done
|
||||
|
@ -44,7 +32,20 @@ testrace: testdeps
|
|||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean google.golang.org/grpc/...
|
||||
go clean -i google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
./coverage.sh --coveralls
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
coverage
|
||||
|
|
4
vendor/src/google.golang.org/grpc/README.md
vendored
4
vendor/src/google.golang.org/grpc/README.md
vendored
|
@ -7,7 +7,7 @@ The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open s
|
|||
Installation
|
||||
------------
|
||||
|
||||
To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
|
@ -16,7 +16,7 @@ $ go get google.golang.org/grpc
|
|||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.4 or above.
|
||||
This requires Go 1.5 or later .
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
|
|
80
vendor/src/google.golang.org/grpc/backoff.go
vendored
Normal file
80
vendor/src/google.golang.org/grpc/backoff.go
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var (
|
||||
DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
baseDelay: 1.0 * time.Second,
|
||||
factor: 1.6,
|
||||
jitter: 0.2,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffStrategy defines the methodology for backing off after a grpc
|
||||
// connection failure.
|
||||
//
|
||||
// This is unexported until the GRPC project decides whether or not to allow
|
||||
// alternative backoff strategies. Once a decision is made, this type and its
|
||||
// method may be exported.
|
||||
type backoffStrategy interface {
|
||||
// backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default GRPC backoff strategy.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
|
||||
// TODO(stevvooe): The following fields are not exported, as allowing
|
||||
// changes would violate the current GRPC specification for backoff. If
|
||||
// GRPC decides to allow more interesting backoff strategies, these fields
|
||||
// may be opened up in the future.
|
||||
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay time.Duration
|
||||
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor float64
|
||||
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter float64
|
||||
}
|
||||
|
||||
func setDefaults(bc *BackoffConfig) {
|
||||
md := bc.MaxDelay
|
||||
*bc = DefaultBackoffConfig
|
||||
|
||||
if md > 0 {
|
||||
bc.MaxDelay = md
|
||||
}
|
||||
}
|
||||
|
||||
func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return bc.baseDelay
|
||||
}
|
||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
39
vendor/src/google.golang.org/grpc/call.go
vendored
39
vendor/src/google.golang.org/grpc/call.go
vendored
|
@ -34,13 +34,13 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
|
@ -48,16 +48,16 @@ import (
|
|||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
var err error
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := &parser{s: stream}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
if err = recv(p, codec, reply); err != nil {
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream
|
|||
}
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
stream, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -81,8 +81,11 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t
|
|||
}
|
||||
}
|
||||
}()
|
||||
// TODO(zhaoq): Support compression.
|
||||
outBuf, err := encode(codec, args, compressionNone)
|
||||
var cbuf *bytes.Buffer
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
outBuf, err := encode(codec, args, compressor, cbuf)
|
||||
if err != nil {
|
||||
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
|
@ -94,16 +97,9 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t
|
|||
return stream, nil
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
// Invoke is called by the generated code. It sends the RPC request on the
|
||||
// wire and returns after response is received.
|
||||
// Invoke sends the RPC request on the wire and returns after response is received.
|
||||
// Invoke is called by generated code. Also users can call Invoke directly when it
|
||||
// is really needed in their use cases.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
|
||||
var c callInfo
|
||||
for _, o := range opts {
|
||||
|
@ -153,6 +149,9 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
t, err = cc.dopts.picker.Pick(ctx)
|
||||
if err != nil {
|
||||
if lastErr != nil {
|
||||
|
@ -164,7 +163,7 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts)
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
lastErr = err
|
||||
|
@ -176,7 +175,7 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
return toRPCErr(err)
|
||||
}
|
||||
// Receive the response
|
||||
lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply)
|
||||
lastErr = recvResponse(cc.dopts, t, &c, stream, reply)
|
||||
if _, ok := lastErr.(transport.ConnectionError); ok {
|
||||
continue
|
||||
}
|
||||
|
@ -187,6 +186,6 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
if lastErr != nil {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return Errorf(stream.StatusCode(), stream.StatusDesc())
|
||||
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
|
||||
}
|
||||
}
|
||||
|
|
69
vendor/src/google.golang.org/grpc/clientconn.go
vendored
69
vendor/src/google.golang.org/grpc/clientconn.go
vendored
|
@ -52,10 +52,10 @@ var (
|
|||
// ErrUnspecTarget indicates that the target address is unspecified.
|
||||
ErrUnspecTarget = errors.New("grpc: target is unspecified")
|
||||
// ErrNoTransportSecurity indicates that there is no transport security
|
||||
// being set for ClientConn. Users should either set one or explicityly
|
||||
// being set for ClientConn. Users should either set one or explicitly
|
||||
// call WithInsecure DialOption to disable security.
|
||||
ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||
// ErrCredentialsMisuse indicates that users want to transmit security infomation
|
||||
// ErrCredentialsMisuse indicates that users want to transmit security information
|
||||
// (e.g., oauth2 token) which requires secure connection on an insecure
|
||||
// connection.
|
||||
ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
|
||||
|
@ -73,6 +73,9 @@ var (
|
|||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoffStrategy
|
||||
picker Picker
|
||||
block bool
|
||||
insecure bool
|
||||
|
@ -89,12 +92,57 @@ func WithCodec(c Codec) DialOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
|
||||
// compressor.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
|
||||
// message decompressor.
|
||||
func WithDecompressor(dc Decompressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// WithPicker returns a DialOption which sets a picker for connection selection.
|
||||
func WithPicker(p Picker) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.picker = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||
// for use.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
// Set defaults to ensure that provided BackoffConfig is valid and
|
||||
// unexported fields get default values.
|
||||
setDefaults(&b)
|
||||
return withBackoff(b)
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for retries after a
|
||||
// failed connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by GRPC.
|
||||
func withBackoff(bs backoffStrategy) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||
// happens in background.
|
||||
|
@ -104,6 +152,8 @@ func WithBlock() DialOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this ClientConn.
|
||||
// Note that transport security is required unless WithInsecure is set.
|
||||
func WithInsecure() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
|
@ -159,6 +209,11 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
|||
// Set the default codec.
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = DefaultBackoffConfig
|
||||
}
|
||||
|
||||
if cc.dopts.picker == nil {
|
||||
cc.dopts.picker = &unicastPicker{
|
||||
target: target,
|
||||
|
@ -267,10 +322,9 @@ func NewConn(cc *ClientConn) (*Conn, error) {
|
|||
if !c.dopts.insecure {
|
||||
var ok bool
|
||||
for _, cd := range c.dopts.copts.AuthOptions {
|
||||
if _, ok := cd.(credentials.TransportAuthenticator); !ok {
|
||||
continue
|
||||
if _, ok = cd.(credentials.TransportAuthenticator); ok {
|
||||
break
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
if !ok {
|
||||
return nil, ErrNoTransportSecurity
|
||||
|
@ -395,7 +449,7 @@ func (cc *Conn) resetTransport(closeTransport bool) error {
|
|||
return ErrClientConnTimeout
|
||||
}
|
||||
}
|
||||
sleepTime := backoff(retries)
|
||||
sleepTime := cc.dopts.bs.backoff(retries)
|
||||
timeout := sleepTime
|
||||
if timeout < minConnectTimeout {
|
||||
timeout = minConnectTimeout
|
||||
|
@ -518,8 +572,9 @@ func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
|
|||
cc.mu.Unlock()
|
||||
return nil, ErrClientConnClosing
|
||||
case cc.state == Ready:
|
||||
ct := cc.transport
|
||||
cc.mu.Unlock()
|
||||
return cc.transport, nil
|
||||
return ct, nil
|
||||
default:
|
||||
ready := cc.ready
|
||||
if ready == nil {
|
||||
|
|
13
vendor/src/google.golang.org/grpc/coverage.sh
vendored
13
vendor/src/google.golang.org/grpc/coverage.sh
vendored
|
@ -4,15 +4,20 @@ set -e
|
|||
|
||||
workdir=.cover
|
||||
profile="$workdir/cover.out"
|
||||
mode=count
|
||||
mode=set
|
||||
end2endtest="google.golang.org/grpc/test"
|
||||
|
||||
generate_cover_data() {
|
||||
rm -rf "$workdir"
|
||||
mkdir "$workdir"
|
||||
|
||||
for pkg in "$@"; do
|
||||
f="$workdir/$(echo $pkg | tr / -).cover"
|
||||
go test -covermode="$mode" -coverprofile="$f" "$pkg"
|
||||
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
|
||||
then
|
||||
f="$workdir/$(echo $pkg | tr / -)"
|
||||
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
|
||||
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "mode: $mode" >"$profile"
|
||||
|
@ -32,6 +37,8 @@ show_cover_report func
|
|||
case "$1" in
|
||||
"")
|
||||
;;
|
||||
--html)
|
||||
show_cover_report html ;;
|
||||
--coveralls)
|
||||
push_to_coveralls ;;
|
||||
*)
|
||||
|
|
|
@ -87,19 +87,6 @@ type AuthInfo interface {
|
|||
AuthType() string
|
||||
}
|
||||
|
||||
type authInfoKey struct{}
|
||||
|
||||
// NewContext creates a new context with authInfo attached.
|
||||
func NewContext(ctx context.Context, authInfo AuthInfo) context.Context {
|
||||
return context.WithValue(ctx, authInfoKey{}, authInfo)
|
||||
}
|
||||
|
||||
// FromContext returns the authInfo in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) {
|
||||
authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// TransportAuthenticator defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportAuthenticator interface {
|
||||
|
|
2
vendor/src/google.golang.org/grpc/doc.go
vendored
2
vendor/src/google.golang.org/grpc/doc.go
vendored
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
See https://github.com/grpc/grpc for more information about gRPC.
|
||||
See www.grpc.io for more information about gRPC.
|
||||
*/
|
||||
package grpc // import "google.golang.org/grpc"
|
||||
|
|
|
@ -42,6 +42,8 @@ import (
|
|||
)
|
||||
|
||||
// Use golang's standard logger by default.
|
||||
// Access is not mutex-protected: do not modify except in init()
|
||||
// functions.
|
||||
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
|
@ -54,7 +56,8 @@ type Logger interface {
|
|||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger that is used in grpc.
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
func SetLogger(l Logger) {
|
||||
logger = l
|
||||
}
|
||||
|
|
74
vendor/src/google.golang.org/grpc/interceptor.go
vendored
Normal file
74
vendor/src/google.golang.org/grpc/interceptor.go
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// UnaryServerInfo consists of various information about a unary RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type UnaryServerInfo struct {
|
||||
// Server is the service implementation the user provides. This is read-only.
|
||||
Server interface{}
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||
// to complete the RPC.
|
||||
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
||||
|
||||
// StreamServerInfo consists of various information about a streaming RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type StreamServerInfo struct {
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
// IsClientStream indicates whether the RPC is a client streaming RPC.
|
||||
IsClientStream bool
|
||||
// IsServerStream indicates whether the RPC is a server streaming RPC.
|
||||
IsServerStream bool
|
||||
}
|
||||
|
||||
// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
|
||||
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||
// complete the RPC.
|
||||
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
49
vendor/src/google.golang.org/grpc/internal/internal.go
vendored
Normal file
49
vendor/src/google.golang.org/grpc/internal/internal.go
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||
// the godoc of the top-level grpc package.
|
||||
package internal
|
||||
|
||||
// TestingCloseConns closes all existing transports but keeps
|
||||
// grpcServer.lis accepting new connections.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingCloseConns func(grpcServer interface{})
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
65
vendor/src/google.golang.org/grpc/peer/peer.go
vendored
Normal file
65
vendor/src/google.golang.org/grpc/peer/peer.go
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package peer defines various peer information associated with RPCs and
|
||||
// corresponding utils.
|
||||
package peer
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Peer contains the information of the peer for an RPC.
|
||||
type Peer struct {
|
||||
// Addr is the peer address.
|
||||
Addr net.Addr
|
||||
// AuthInfo is the authentication information of the transport.
|
||||
// It is nil if there is no transport security being used.
|
||||
AuthInfo credentials.AuthInfo
|
||||
}
|
||||
|
||||
type peerKey struct{}
|
||||
|
||||
// NewContext creates a new context with peer information attached.
|
||||
func NewContext(ctx context.Context, p *Peer) context.Context {
|
||||
return context.WithValue(ctx, peerKey{}, p)
|
||||
}
|
||||
|
||||
// FromContext returns the peer information in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (p *Peer, ok bool) {
|
||||
p, ok = ctx.Value(peerKey{}).(*Peer)
|
||||
return
|
||||
}
|
2
vendor/src/google.golang.org/grpc/picker.go
vendored
2
vendor/src/google.golang.org/grpc/picker.go
vendored
|
@ -172,7 +172,7 @@ func (p *unicastNamingPicker) processUpdates() error {
|
|||
}
|
||||
p.mu.Unlock()
|
||||
default:
|
||||
grpclog.Println("Unknown update.Op %d", update.Op)
|
||||
grpclog.Println("Unknown update.Op ", update.Op)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
203
vendor/src/google.golang.org/grpc/rpc_util.go
vendored
203
vendor/src/google.golang.org/grpc/rpc_util.go
vendored
|
@ -34,13 +34,14 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -75,6 +76,71 @@ func (protoCodec) String() string {
|
|||
return "proto"
|
||||
}
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
type Compressor interface {
|
||||
// Do compresses p into w.
|
||||
Do(w io.Writer, p []byte) error
|
||||
// Type returns the compression algorithm the Compressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||
func NewGZIPCompressor() Compressor {
|
||||
return &gzipCompressor{}
|
||||
}
|
||||
|
||||
type gzipCompressor struct {
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||
z := gzip.NewWriter(w)
|
||||
if _, err := z.Write(p); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.Close()
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||
type Decompressor interface {
|
||||
// Do reads the data from r and uncompress them.
|
||||
Do(r io.Reader) ([]byte, error)
|
||||
// Type returns the compression algorithm the Decompressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
type gzipDecompressor struct {
|
||||
}
|
||||
|
||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||
func NewGZIPDecompressor() Decompressor {
|
||||
return &gzipDecompressor{}
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||
z, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer z.Close()
|
||||
return ioutil.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
// CallOption configures a Call before it starts or extracts information from
|
||||
// a Call after it completes.
|
||||
type CallOption interface {
|
||||
|
@ -118,36 +184,49 @@ type payloadFormat uint8
|
|||
|
||||
const (
|
||||
compressionNone payloadFormat = iota // no compression
|
||||
compressionFlate
|
||||
// More formats
|
||||
compressionMade
|
||||
)
|
||||
|
||||
// parser reads complelete gRPC messages from the underlying reader.
|
||||
type parser struct {
|
||||
s io.Reader
|
||||
}
|
||||
// r is the underlying reader.
|
||||
// See the comment on recvMsg for the permissible
|
||||
// error types.
|
||||
r io.Reader
|
||||
|
||||
// recvMsg is to read a complete gRPC message from the stream. It is blocking if
|
||||
// the message has not been complete yet. It returns the message and its type,
|
||||
// EOF is returned with nil msg and 0 pf if the entire stream is done. Other
|
||||
// non-nil error is returned if something is wrong on reading.
|
||||
func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
|
||||
// The header of a gRPC message. Find more detail
|
||||
// at http://www.grpc.io/docs/guides/wire.html.
|
||||
var buf [5]byte
|
||||
header [5]byte
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(p.s, buf[:]); err != nil {
|
||||
// recvMsg reads a complete gRPC message from the stream.
|
||||
//
|
||||
// It returns the message and its payload (compression/encoding)
|
||||
// format. The caller owns the returned msg memory.
|
||||
//
|
||||
// If there is an error, possible values are:
|
||||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
|
||||
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
pf = payloadFormat(buf[0])
|
||||
length := binary.BigEndian.Uint32(buf[1:])
|
||||
pf = payloadFormat(p.header[0])
|
||||
length := binary.BigEndian.Uint32(p.header[1:])
|
||||
|
||||
if length == 0 {
|
||||
return pf, nil, nil
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
msg = make([]byte, int(length))
|
||||
if _, err := io.ReadFull(p.s, msg); err != nil {
|
||||
if _, err := io.ReadFull(p.r, msg); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -158,7 +237,7 @@ func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
|
|||
|
||||
// encode serializes msg and prepends the message header. If msg is nil, it
|
||||
// generates the message header of 0 message length.
|
||||
func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
||||
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
|
||||
var b []byte
|
||||
var length uint
|
||||
if msg != nil {
|
||||
|
@ -168,6 +247,12 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cp != nil {
|
||||
if err := cp.Do(cbuf, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = cbuf.Bytes()
|
||||
}
|
||||
length = uint(len(b))
|
||||
}
|
||||
if length > math.MaxUint32 {
|
||||
|
@ -182,7 +267,11 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
|||
var buf = make([]byte, payloadLen+sizeLen+len(b))
|
||||
|
||||
// Write payload format
|
||||
buf[0] = byte(pf)
|
||||
if cp == nil {
|
||||
buf[0] = byte(compressionNone)
|
||||
} else {
|
||||
buf[0] = byte(compressionMade)
|
||||
}
|
||||
// Write length of b into buf
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(length))
|
||||
// Copy encoded msg to buf
|
||||
|
@ -191,22 +280,38 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
|||
return buf, nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, m interface{}) error {
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
case compressionMade:
|
||||
if recvCompress == "" {
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress)
|
||||
}
|
||||
if dc == nil || recvCompress != dc.Type() {
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
}
|
||||
default:
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error {
|
||||
pf, d, err := p.recvMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
if rErr, ok := err.(rpcError); ok {
|
||||
return rErr
|
||||
} else {
|
||||
return Errorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||
return err
|
||||
}
|
||||
if pf == compressionMade {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
default:
|
||||
return Errorf(codes.Internal, "gprc: compression is not supported yet.")
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -218,7 +323,7 @@ type rpcError struct {
|
|||
}
|
||||
|
||||
func (e rpcError) Error() string {
|
||||
return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc)
|
||||
return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
|
||||
}
|
||||
|
||||
// Code returns the error code for err if it was produced by the rpc system.
|
||||
|
@ -304,34 +409,10 @@ func convertCode(err error) codes.Code {
|
|||
return codes.Unknown
|
||||
}
|
||||
|
||||
const (
|
||||
// how long to wait after the first failure before retrying
|
||||
baseDelay = 1.0 * time.Second
|
||||
// upper bound of backoff delay
|
||||
maxDelay = 120 * time.Second
|
||||
// backoff increases by this factor on each retry
|
||||
backoffFactor = 1.6
|
||||
// backoff is randomized downwards by this factor
|
||||
backoffJitter = 0.2
|
||||
)
|
||||
|
||||
func backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(maxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= backoffFactor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + backoffJitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
// SupportPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
//
|
||||
// This constant may be renamed in the future if a change in the generated code
|
||||
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||
// should not be referenced from any other code.
|
||||
const SupportPackageIsVersion2 = true
|
||||
|
|
455
vendor/src/google.golang.org/grpc/server.go
vendored
455
vendor/src/google.golang.org/grpc/server.go
vendored
|
@ -34,10 +34,12 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -45,15 +47,17 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error)
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
|
@ -81,10 +85,11 @@ type service struct {
|
|||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
type Server struct {
|
||||
opts options
|
||||
mu sync.Mutex
|
||||
opts options
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
lis map[net.Listener]bool
|
||||
conns map[transport.ServerTransport]bool
|
||||
conns map[io.Closer]bool
|
||||
m map[string]*service // service name -> service info
|
||||
events trace.EventLog
|
||||
}
|
||||
|
@ -92,7 +97,12 @@ type Server struct {
|
|||
type options struct {
|
||||
creds credentials.Credentials
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
unaryInt UnaryServerInterceptor
|
||||
streamInt StreamServerInterceptor
|
||||
maxConcurrentStreams uint32
|
||||
useHandlerImpl bool // use http.Handler-based server
|
||||
}
|
||||
|
||||
// A ServerOption sets options.
|
||||
|
@ -105,6 +115,18 @@ func CustomCodec(codec Codec) ServerOption {
|
|||
}
|
||||
}
|
||||
|
||||
func RPCCompressor(cp Compressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
func RPCDecompressor(dc Decompressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
|
@ -120,6 +142,29 @@ func Creds(c credentials.Credentials) ServerOption {
|
|||
}
|
||||
}
|
||||
|
||||
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
|
||||
// server. Only one unary interceptor can be installed. The construction of multiple
|
||||
// interceptors (e.g., chaining) can be implemented at the caller.
|
||||
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
|
||||
return func(o *options) {
|
||||
if o.unaryInt != nil {
|
||||
panic("The unary server interceptor has been set.")
|
||||
}
|
||||
o.unaryInt = i
|
||||
}
|
||||
}
|
||||
|
||||
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
|
||||
// server. Only one stream interceptor can be installed.
|
||||
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
|
||||
return func(o *options) {
|
||||
if o.streamInt != nil {
|
||||
panic("The stream server interceptor has been set.")
|
||||
}
|
||||
o.streamInt = i
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
|
@ -134,7 +179,7 @@ func NewServer(opt ...ServerOption) *Server {
|
|||
s := &Server{
|
||||
lis: make(map[net.Listener]bool),
|
||||
opts: opts,
|
||||
conns: make(map[transport.ServerTransport]bool),
|
||||
conns: make(map[io.Closer]bool),
|
||||
m: make(map[string]*service),
|
||||
}
|
||||
if EnableTracing {
|
||||
|
@ -201,9 +246,17 @@ var (
|
|||
ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
)
|
||||
|
||||
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
creds, ok := s.opts.creds.(credentials.TransportAuthenticator)
|
||||
if !ok {
|
||||
return rawConn, nil, nil
|
||||
}
|
||||
return creds.ServerHandshake(rawConn)
|
||||
}
|
||||
|
||||
// Serve accepts incoming connections on the listener lis, creating a new
|
||||
// ServerTransport and service goroutine for each. The service goroutines
|
||||
// read gRPC request and then call the registered handlers to reply to them.
|
||||
// read gRPC requests and then call the registered handlers to reply to them.
|
||||
// Service returns when lis.Accept fails.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
|
@ -221,74 +274,167 @@ func (s *Server) Serve(lis net.Listener) error {
|
|||
s.mu.Unlock()
|
||||
}()
|
||||
for {
|
||||
c, err := lis.Accept()
|
||||
rawConn, err := lis.Accept()
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.printf("done serving; Accept = %v", err)
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
var authInfo credentials.AuthInfo
|
||||
if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {
|
||||
var conn net.Conn
|
||||
conn, authInfo, err = creds.ServerHandshake(c)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
grpclog.Println("grpc: Server.Serve failed to complete security handshake.")
|
||||
continue
|
||||
}
|
||||
c = conn
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
return nil
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
|
||||
if err != nil {
|
||||
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
continue
|
||||
}
|
||||
s.conns[st] = true
|
||||
s.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
var trInfo *traceInfo
|
||||
if EnableTracing {
|
||||
trInfo = &traceInfo{
|
||||
tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
|
||||
}
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.firstLine.remoteAddr = st.RemoteAddr()
|
||||
stream.TraceContext(trInfo.tr)
|
||||
if dl, ok := stream.Context().Deadline(); ok {
|
||||
trInfo.firstLine.deadline = dl.Sub(time.Now())
|
||||
}
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.handleStream(st, stream, trInfo)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
s.mu.Lock()
|
||||
delete(s.conns, st)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
// Start a new goroutine to deal with rawConn
|
||||
// so we don't stall this Accept loop goroutine.
|
||||
go s.handleRawConn(rawConn)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error {
|
||||
p, err := encode(s.opts.codec, msg, pf)
|
||||
// handleRawConn is run in its own goroutine and handles a just-accepted
|
||||
// connection that has not had any I/O performed on it yet.
|
||||
func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||
rawConn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if s.opts.useHandlerImpl {
|
||||
s.serveUsingHandler(conn)
|
||||
} else {
|
||||
s.serveNewHTTP2Transport(conn, authInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// serveNewHTTP2Transport sets up a new http/2 transport (using the
|
||||
// gRPC http2 server transport in transport/http2_server.go) and
|
||||
// serves streams on it.
|
||||
// This is run in its own goroutine (it does network I/O in
|
||||
// transport.NewServerTransport).
|
||||
func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
|
||||
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer s.removeConn(st)
|
||||
defer st.Close()
|
||||
var wg sync.WaitGroup
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var _ http.Handler = (*Server)(nil)
|
||||
|
||||
// serveUsingHandler is called from handleRawConn when s is configured
|
||||
// to handle requests via the http.Handler interface. It sets up a
|
||||
// net/http.Server to handle the just-accepted conn. The http.Server
|
||||
// is configured to route all incoming requests (all HTTP/2 streams)
|
||||
// to ServeHTTP, which creates a new ServerTransport for each stream.
|
||||
// serveUsingHandler blocks until conn closes.
|
||||
//
|
||||
// This codepath is only used when Server.TestingUseHandlerImpl has
|
||||
// been configured. This lets the end2end tests exercise the ServeHTTP
|
||||
// method as one of the environment types.
|
||||
//
|
||||
// conn is the *tls.Conn that's already been authenticated.
|
||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||
if !s.addConn(conn) {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(conn)
|
||||
h2s := &http2.Server{
|
||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
||||
}
|
||||
h2s.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: s,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(st)
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
|
||||
// If tracing is not enabled, it returns nil.
|
||||
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
|
||||
if !EnableTracing {
|
||||
return nil
|
||||
}
|
||||
trInfo = &traceInfo{
|
||||
tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
|
||||
}
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.firstLine.remoteAddr = st.RemoteAddr()
|
||||
stream.TraceContext(trInfo.tr)
|
||||
if dl, ok := stream.Context().Deadline(); ok {
|
||||
trInfo.firstLine.deadline = dl.Sub(time.Now())
|
||||
}
|
||||
return trInfo
|
||||
}
|
||||
|
||||
func (s *Server) addConn(c io.Closer) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil {
|
||||
return false
|
||||
}
|
||||
s.conns[c] = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Server) removeConn(c io.Closer) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns != nil {
|
||||
delete(s.conns, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
|
||||
var cbuf *bytes.Buffer
|
||||
if cp != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
p, err := encode(s.opts.codec, msg, cp, cbuf)
|
||||
if err != nil {
|
||||
// This typically indicates a fatal issue (e.g., memory
|
||||
// corruption or hardware faults) the application program
|
||||
|
@ -314,97 +460,130 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
}
|
||||
}()
|
||||
}
|
||||
p := &parser{s: stream}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
pf, req, err := p.recvMsg()
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"}
|
||||
}
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
statusCode := codes.OK
|
||||
statusDesc := ""
|
||||
df := func(v interface{}) error {
|
||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
||||
return err
|
||||
|
||||
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), df)
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
statusCode = err.code
|
||||
statusDesc = err.desc
|
||||
} else {
|
||||
statusCode = convertCode(appErr)
|
||||
statusDesc = appErr.Error()
|
||||
}
|
||||
if trInfo != nil && statusCode != codes.OK {
|
||||
trInfo.tr.LazyLog(stringer(statusDesc), true)
|
||||
trInfo.tr.SetError()
|
||||
default:
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
|
||||
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
statusCode := codes.OK
|
||||
statusDesc := ""
|
||||
df := func(v interface{}) error {
|
||||
if pf == compressionMade {
|
||||
var err error
|
||||
req, err = s.opts.dc.Do(bytes.NewReader(req))
|
||||
if err != nil {
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
opts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
statusCode = err.Code
|
||||
statusDesc = err.Desc
|
||||
default:
|
||||
statusCode = codes.Unknown
|
||||
statusDesc = err.Error()
|
||||
}
|
||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
||||
}
|
||||
return t.WriteStatus(stream, statusCode, statusDesc)
|
||||
default:
|
||||
panic(fmt.Sprintf("payload format to be supported: %d", pf))
|
||||
return nil
|
||||
}
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
statusCode = err.code
|
||||
statusDesc = err.desc
|
||||
} else {
|
||||
statusCode = convertCode(appErr)
|
||||
statusDesc = appErr.Error()
|
||||
}
|
||||
if trInfo != nil && statusCode != codes.OK {
|
||||
trInfo.tr.LazyLog(stringer(statusDesc), true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
opts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
statusCode = err.Code
|
||||
statusDesc = err.Desc
|
||||
default:
|
||||
statusCode = codes.Unknown
|
||||
statusDesc = err.Error()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
}
|
||||
return t.WriteStatus(stream, statusCode, statusDesc)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
ss := &serverStream{
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{s: stream},
|
||||
p: &parser{r: stream},
|
||||
codec: s.opts.codec,
|
||||
cp: s.opts.cp,
|
||||
dc: s.opts.dc,
|
||||
trInfo: trInfo,
|
||||
}
|
||||
if ss.cp != nil {
|
||||
ss.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
|
@ -418,10 +597,24 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
ss.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
if appErr := sd.Handler(srv.server, ss); appErr != nil {
|
||||
var appErr error
|
||||
if s.opts.streamInt == nil {
|
||||
appErr = sd.Handler(srv.server, ss)
|
||||
} else {
|
||||
info := &StreamServerInfo{
|
||||
FullMethod: stream.Method(),
|
||||
IsClientStream: sd.ClientStreams,
|
||||
IsServerStream: sd.ServerStreams,
|
||||
}
|
||||
appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
|
||||
}
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
ss.statusCode = err.code
|
||||
ss.statusDesc = err.desc
|
||||
} else if err, ok := appErr.(transport.StreamError); ok {
|
||||
ss.statusCode = err.Code
|
||||
ss.statusDesc = err.Desc
|
||||
} else {
|
||||
ss.statusCode = convertCode(appErr)
|
||||
ss.statusDesc = appErr.Error()
|
||||
|
@ -509,8 +702,11 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||
}
|
||||
}
|
||||
|
||||
// Stop stops the gRPC server. Once Stop returns, the server stops accepting
|
||||
// connection requests and closes all the connected connections.
|
||||
// Stop stops the gRPC server. It immediately closes all open
|
||||
// connections and listeners.
|
||||
// It cancels all active RPCs on the server side and the corresponding
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
|
@ -518,12 +714,14 @@ func (s *Server) Stop() {
|
|||
cs := s.conns
|
||||
s.conns = nil
|
||||
s.mu.Unlock()
|
||||
|
||||
for lis := range listeners {
|
||||
lis.Close()
|
||||
}
|
||||
for c := range cs {
|
||||
c.Close()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
|
@ -532,14 +730,23 @@ func (s *Server) Stop() {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// TestingCloseConns closes all exiting transports but keeps s.lis accepting new
|
||||
// connections. This is for test only now.
|
||||
func (s *Server) TestingCloseConns() {
|
||||
func init() {
|
||||
internal.TestingCloseConns = func(arg interface{}) {
|
||||
arg.(*Server).testingCloseConns()
|
||||
}
|
||||
internal.TestingUseHandlerImpl = func(arg interface{}) {
|
||||
arg.(*Server).opts.useHandlerImpl = true
|
||||
}
|
||||
}
|
||||
|
||||
// testingCloseConns closes all existing transports but keeps s.lis
|
||||
// accepting new connections.
|
||||
func (s *Server) testingCloseConns() {
|
||||
s.mu.Lock()
|
||||
for c := range s.conns {
|
||||
c.Close()
|
||||
delete(s.conns, c)
|
||||
}
|
||||
s.conns = make(map[transport.ServerTransport]bool)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
|
|
76
vendor/src/google.golang.org/grpc/stream.go
vendored
76
vendor/src/google.golang.org/grpc/stream.go
vendored
|
@ -34,6 +34,7 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
@ -46,12 +47,14 @@ import (
|
|||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
type streamHandler func(srv interface{}, stream ServerStream) error
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification.
|
||||
type StreamDesc struct {
|
||||
StreamName string
|
||||
Handler streamHandler
|
||||
Handler StreamHandler
|
||||
|
||||
// At least one of these is true.
|
||||
ServerStreams bool
|
||||
|
@ -66,18 +69,19 @@ type Stream interface {
|
|||
// breaks.
|
||||
// On error, it aborts the stream and returns an RPC status on client
|
||||
// side. On server side, it simply returns the error to the caller.
|
||||
// SendMsg is called by generated code.
|
||||
// SendMsg is called by generated code. Also Users can call SendMsg
|
||||
// directly when it is really needed in their use cases.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message or the stream is
|
||||
// done. On client side, it returns io.EOF when the stream is done. On
|
||||
// any other error, it aborts the streama nd returns an RPC status. On
|
||||
// any other error, it aborts the stream and returns an RPC status. On
|
||||
// server side, it simply returns the error to the caller.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientStream defines the interface a client stream has to satify.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metedata received from the server if there
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server. It must be called
|
||||
|
@ -108,12 +112,22 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
Flush: desc.ServerStreams && desc.ClientStreams,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
cs := &clientStream{
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
cp: cc.dopts.cp,
|
||||
dc: cc.dopts.dc,
|
||||
tracing: EnableTracing,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
cs.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if cs.tracing {
|
||||
cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
cs.trInfo.firstLine.client = true
|
||||
|
@ -125,16 +139,23 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
}
|
||||
s, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
cs.t = t
|
||||
cs.s = s
|
||||
cs.p = &parser{s: s}
|
||||
cs.p = &parser{r: s}
|
||||
// Listen on ctx.Done() to detect cancellation when there is no pending
|
||||
// I/O operations on this stream.
|
||||
go func() {
|
||||
<-s.Context().Done()
|
||||
cs.closeTransportStream(transport.ContextErr(s.Context().Err()))
|
||||
select {
|
||||
case <-t.Error():
|
||||
// Incur transport error, simply exit.
|
||||
case <-s.Context().Done():
|
||||
err := s.Context().Err()
|
||||
cs.finish(err)
|
||||
cs.closeTransportStream(transport.ContextErr(err))
|
||||
}
|
||||
}()
|
||||
return cs, nil
|
||||
}
|
||||
|
@ -146,6 +167,9 @@ type clientStream struct {
|
|||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
cbuf *bytes.Buffer
|
||||
dc Decompressor
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
|
||||
|
@ -183,6 +207,9 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
cs.mu.Unlock()
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
if err == nil || err == io.EOF {
|
||||
return
|
||||
}
|
||||
|
@ -191,7 +218,12 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
err = toRPCErr(err)
|
||||
}()
|
||||
out, err := encode(cs.codec, m, compressionNone)
|
||||
out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
|
||||
defer func() {
|
||||
if cs.cbuf != nil {
|
||||
cs.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
|
@ -199,7 +231,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
err = recv(cs.p, cs.codec, m)
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
|
||||
defer func() {
|
||||
// err != nil indicates the termination of the stream.
|
||||
if err != nil {
|
||||
|
@ -218,16 +250,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||
return
|
||||
}
|
||||
// Special handling for client streaming rpc.
|
||||
err = recv(cs.p, cs.codec, m)
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
|
||||
cs.closeTransportStream(err)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
if err == io.EOF {
|
||||
if cs.s.StatusCode() == codes.OK {
|
||||
cs.finish(err)
|
||||
return nil
|
||||
}
|
||||
return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
|
||||
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
@ -239,13 +272,18 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||
// Returns io.EOF to indicate the end of the stream.
|
||||
return
|
||||
}
|
||||
return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
|
||||
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() (err error) {
|
||||
err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil || err == io.EOF {
|
||||
return
|
||||
}
|
||||
|
@ -303,6 +341,9 @@ type serverStream struct {
|
|||
s *transport.Stream
|
||||
p *parser
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cbuf *bytes.Buffer
|
||||
statusCode codes.Code
|
||||
statusDesc string
|
||||
trInfo *traceInfo
|
||||
|
@ -341,7 +382,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
out, err := encode(ss.codec, m, compressionNone)
|
||||
out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
|
||||
defer func() {
|
||||
if ss.cbuf != nil {
|
||||
ss.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
return err
|
||||
|
@ -364,5 +410,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
return recv(ss.p, ss.codec, m)
|
||||
return recv(ss.p, ss.codec, ss.s, ss.dc, m)
|
||||
}
|
||||
|
|
|
@ -56,43 +56,33 @@ type windowUpdate struct {
|
|||
increment uint32
|
||||
}
|
||||
|
||||
func (windowUpdate) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*windowUpdate) item() {}
|
||||
|
||||
type settings struct {
|
||||
ack bool
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (settings) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*settings) item() {}
|
||||
|
||||
type resetStream struct {
|
||||
streamID uint32
|
||||
code http2.ErrCode
|
||||
}
|
||||
|
||||
func (resetStream) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*resetStream) item() {}
|
||||
|
||||
type flushIO struct {
|
||||
}
|
||||
|
||||
func (flushIO) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*flushIO) item() {}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (ping) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*ping) item() {}
|
||||
|
||||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||
// when it is available.
|
||||
|
@ -172,10 +162,6 @@ func (qb *quotaPool) acquire() <-chan int {
|
|||
type inFlow struct {
|
||||
// The inbound flow control limit for pending data.
|
||||
limit uint32
|
||||
// conn points to the shared connection-level inFlow that is shared
|
||||
// by all streams on that conn. It is nil for the inFlow on the conn
|
||||
// directly.
|
||||
conn *inFlow
|
||||
|
||||
mu sync.Mutex
|
||||
// pendingData is the overall data which have been received but not been
|
||||
|
@ -186,75 +172,39 @@ type inFlow struct {
|
|||
pendingUpdate uint32
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It increments not only its
|
||||
// own pendingData but also that of the associated connection-level flow.
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData+f.pendingUpdate+n > f.limit {
|
||||
return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit)
|
||||
}
|
||||
if f.conn != nil {
|
||||
if err := f.conn.onData(n); err != nil {
|
||||
return ConnectionErrorf("%v", err)
|
||||
}
|
||||
}
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit {
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// connOnRead updates the connection level states when the application consumes data.
|
||||
func (f *inFlow) connOnRead(n uint32) uint32 {
|
||||
if n == 0 || f.conn != nil {
|
||||
return 0
|
||||
}
|
||||
// onRead is invoked when the application reads the data. It returns the window size
|
||||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData == 0 {
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
ret := f.pendingUpdate
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
return ret
|
||||
return wu
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// onRead is invoked when the application reads the data. It returns the window updates
|
||||
// for both stream and connection level.
|
||||
func (f *inFlow) onRead(n uint32) (swu, cwu uint32) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData == 0 {
|
||||
// pendingData has been adjusted by restoreConn.
|
||||
return
|
||||
}
|
||||
f.pendingData -= n
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
swu = f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
}
|
||||
cwu = f.conn.connOnRead(n)
|
||||
return
|
||||
}
|
||||
|
||||
// restoreConn is invoked when a stream is terminated. It removes its stake in
|
||||
// the connection-level flow and resets its own state.
|
||||
func (f *inFlow) restoreConn() uint32 {
|
||||
if f.conn == nil {
|
||||
return 0
|
||||
}
|
||||
func (f *inFlow) resetPendingData() uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
n := f.pendingData
|
||||
f.pendingData = 0
|
||||
f.pendingUpdate = 0
|
||||
return f.conn.connOnRead(n)
|
||||
return n
|
||||
}
|
||||
|
|
383
vendor/src/google.golang.org/grpc/transport/handler_server.go
vendored
Normal file
383
vendor/src/google.golang.org/grpc/transport/handler_server.go
vendored
Normal file
|
@ -0,0 +1,383 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||
// uses the standard Go http2 Server implementation (via the
|
||||
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
}
|
||||
if _, ok := w.(http.CloseNotifier); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := timeoutDecode(v)
|
||||
if err != nil {
|
||||
return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
var metakv []string
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
if k == "user-agent" {
|
||||
// user-agent is special. Copying logic of http_util.go.
|
||||
if i := strings.LastIndex(v, " "); i == -1 {
|
||||
// There is no application user agent string being set
|
||||
continue
|
||||
} else {
|
||||
v = v[:i]
|
||||
}
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
|
||||
}
|
||||
}
|
||||
st.headerMD = metadata.Pairs(metakv...)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// serverHandlerTransport is an implementation of ServerTransport
|
||||
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
// writes is a channel of code to run serialized in the
|
||||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
type strAddr string
|
||||
|
||||
func (a strAddr) Network() string {
|
||||
if a != "" {
|
||||
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
//
|
||||
// If we want to support Unix sockets later, we can
|
||||
// add our own grpc-specific convention within the
|
||||
// grpc codebase to set RemoteAddr to a different
|
||||
// format, or probably better: we can attach it to the
|
||||
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||
return "tcp"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a strAddr) String() string { return string(a) }
|
||||
|
||||
// do runs fn in the ServeHTTP goroutine.
|
||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||
select {
|
||||
case ht.writes <- fn:
|
||||
return nil
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
// first call (for example, in end2end tests's TestNoService).
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
|
||||
h := ht.rw.Header()
|
||||
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
|
||||
if statusDesc != "" {
|
||||
h.Set("Grpc-Message", statusDesc)
|
||||
}
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
for _, v := range vv {
|
||||
// http2 ResponseWriter mechanism to
|
||||
// send undeclared Trailers after the
|
||||
// headers have possibly been written.
|
||||
h.Add(http2.TrailerPrefix+k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
close(ht.writes)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", "application/grpc")
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
// Trailers per the net/http.ResponseWriter contract.
|
||||
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.rw.Write(data)
|
||||
if !opts.Delay {
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
for _, v := range vv {
|
||||
h.Add(k, v)
|
||||
}
|
||||
}
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
// or the status has been written via WriteStatus.
|
||||
requestOver := make(chan struct{})
|
||||
|
||||
// clientGone receives a single value if peer is gone, either
|
||||
// because the underlying connection is dead or because the
|
||||
// peer sends an http2 RST_STREAM.
|
||||
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
||||
go func() {
|
||||
select {
|
||||
case <-requestOver:
|
||||
return
|
||||
case <-ht.closedCh:
|
||||
case <-clientGone:
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
windowHandler: func(int) {}, // nothing
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{*req.TLS}
|
||||
}
|
||||
ctx = metadata.NewContext(ctx, ht.headerMD)
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
s.ctx = newContextWithStream(ctx, s)
|
||||
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
|
||||
|
||||
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||
readerDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readerDone)
|
||||
|
||||
// TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||
const readSize = 8196
|
||||
for buf := make([]byte, readSize); ; {
|
||||
n, err := req.Body.Read(buf)
|
||||
if n > 0 {
|
||||
s.buf.put(&recvMsg{data: buf[:n:n]})
|
||||
buf = buf[n:]
|
||||
}
|
||||
if err != nil {
|
||||
s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
|
||||
return
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
buf = make([]byte, readSize)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// startStream is provided by the *grpc.Server's serveStreams.
|
||||
// It starts a goroutine serving s and exits immediately.
|
||||
// The goroutine that is started is the one that then calls
|
||||
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||
startStream(s)
|
||||
|
||||
ht.runStream()
|
||||
close(requestOver)
|
||||
|
||||
// Wait for reading goroutine to finish.
|
||||
req.Body.Close()
|
||||
<-readerDone
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) runStream() {
|
||||
for {
|
||||
select {
|
||||
case fn, ok := <-ht.writes:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
fn()
|
||||
case <-ht.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return StreamError{
|
||||
Code: code,
|
||||
Desc: se.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return ConnectionError{Desc: err.Error()}
|
||||
}
|
|
@ -50,6 +50,7 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
|
@ -139,29 +140,6 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
conn.Close()
|
||||
}
|
||||
}()
|
||||
// Send connection preface to server.
|
||||
n, err := conn.Write(clientPreface)
|
||||
if err != nil {
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
if n != len(clientPreface) {
|
||||
return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
}
|
||||
framer := newFramer(conn)
|
||||
if initialWindowSize != defaultWindowSize {
|
||||
err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
|
||||
} else {
|
||||
err = framer.writeSettings(true)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
|
||||
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
}
|
||||
ua := primaryUA
|
||||
if opts.UserAgent != "" {
|
||||
ua = opts.UserAgent + " " + ua
|
||||
|
@ -177,7 +155,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
writableChan: make(chan int, 1),
|
||||
shutdownChan: make(chan struct{}),
|
||||
errorChan: make(chan struct{}),
|
||||
framer: framer,
|
||||
framer: newFramer(conn),
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
controlBuf: newRecvBuffer(),
|
||||
|
@ -190,27 +168,49 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
maxStreams: math.MaxInt32,
|
||||
streamSendQuota: defaultWindowSize,
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
// a dedicated goroutine which reads HTTP2 frame from network. Then it
|
||||
// dispatches the frame to the corresponding stream entity.
|
||||
go t.reader()
|
||||
// Send connection preface to server.
|
||||
n, err := t.conn.Write(clientPreface)
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
if n != len(clientPreface) {
|
||||
t.Close()
|
||||
return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
}
|
||||
if initialWindowSize != defaultWindowSize {
|
||||
err = t.framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
|
||||
} else {
|
||||
err = t.framer.writeSettings(true)
|
||||
}
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
|
||||
if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||
t.Close()
|
||||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
}
|
||||
go t.controller()
|
||||
t.writableChan <- 0
|
||||
// Start the reader goroutine for incoming message. The threading model
|
||||
// on receiving is that each transport has a dedicated goroutine which
|
||||
// reads HTTP2 frame from network. Then it dispatches the frame to the
|
||||
// corresponding stream entity.
|
||||
go t.reader()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
fc := &inFlow{
|
||||
limit: initialWindowSize,
|
||||
conn: t.fc,
|
||||
}
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
id: t.nextID,
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
fc: fc,
|
||||
fc: &inFlow{limit: initialWindowSize},
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
headerChan: make(chan struct{}),
|
||||
}
|
||||
|
@ -234,14 +234,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
var timeout time.Duration
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
timeout = dl.Sub(time.Now())
|
||||
if timeout <= 0 {
|
||||
return nil, ContextErr(context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ContextErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
ctx = credentials.NewContext(ctx, t.authInfo)
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
authData := make(map[string]string)
|
||||
for _, c := range t.authCreds {
|
||||
// Construct URI required to get auth request metadata.
|
||||
|
@ -317,10 +323,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if timeout > 0 {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
|
||||
}
|
||||
for k, v := range authData {
|
||||
// Capital header names are illegal in HTTP/2.
|
||||
k = strings.ToLower(k)
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
var (
|
||||
|
@ -344,6 +355,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
var flush bool
|
||||
if endHeaders && (hasMD || callHdr.Flush) {
|
||||
flush = true
|
||||
}
|
||||
if first {
|
||||
// Sends a HeadersFrame to server to start a new stream.
|
||||
p := http2.HeadersFrameParam{
|
||||
|
@ -355,11 +370,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
// Do a force flush for the buffered frames iff it is the last headers frame
|
||||
// and there is header metadata to be sent. Otherwise, there is flushing until
|
||||
// the corresponding data frame is written.
|
||||
err = t.framer.writeHeaders(hasMD && endHeaders, p)
|
||||
err = t.framer.writeHeaders(flush, p)
|
||||
first = false
|
||||
} else {
|
||||
// Sends Continuation frames for the leftover headers.
|
||||
err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size))
|
||||
err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
|
||||
}
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
|
@ -389,8 +404,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
|
|||
// other goroutines.
|
||||
s.cancel()
|
||||
s.mu.Lock()
|
||||
if q := s.fc.restoreConn(); q > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, q})
|
||||
if q := s.fc.resetPendingData(); q > 0 {
|
||||
if n := t.fc.onRead(q); n > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, n})
|
||||
}
|
||||
}
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
|
@ -412,6 +429,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
|
|||
// accessed any more.
|
||||
func (t *http2Client) Close() (err error) {
|
||||
t.mu.Lock()
|
||||
if t.state == reachable {
|
||||
close(t.errorChan)
|
||||
}
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return errors.New("transport: Close() was already called")
|
||||
|
@ -490,6 +510,10 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
|
|||
t.framer.adjustNumWriters(1)
|
||||
// Got some quota. Try to acquire writing privilege on the transport.
|
||||
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
// Return the connection quota back.
|
||||
t.sendQuotaPool.add(len(p))
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
// This writer is the last one in this batch and has the
|
||||
// responsibility to flush the buffered frames. It queues
|
||||
|
@ -499,6 +523,16 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
t.sendQuotaPool.add(len(p))
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return ContextErr(s.ctx.Err())
|
||||
default:
|
||||
}
|
||||
if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
|
||||
// Do a force flush iff this is last frame for the entire gRPC message
|
||||
// and the caller is the only writer at this moment.
|
||||
|
@ -537,47 +571,52 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
|
|||
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.activeStreams == nil {
|
||||
// The transport is closing.
|
||||
return nil, false
|
||||
}
|
||||
if s, ok := t.activeStreams[f.Header().StreamID]; ok {
|
||||
return s, true
|
||||
}
|
||||
return nil, false
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
// updateWindow adjusts the inbound quota for the stream and the transport.
|
||||
// Window updates will deliver to the controller for sending when
|
||||
// the cumulative quota exceeds the corresponding threshold.
|
||||
func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
||||
swu, cwu := s.fc.onRead(n)
|
||||
if swu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, swu})
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.state == streamDone {
|
||||
return
|
||||
}
|
||||
if cwu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cwu})
|
||||
if w := t.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
if w := s.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
size := len(f.Data())
|
||||
if err := t.fc.onData(uint32(size)); err != nil {
|
||||
t.notifyError(ConnectionErrorf("%v", err))
|
||||
return
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if size > 0 {
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
// The stream has been closed. Release the corresponding quota.
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
s.statusCode = codes.Internal
|
||||
s.statusDesc = err.Error()
|
||||
|
@ -586,6 +625,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
|
@ -624,9 +664,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)]
|
||||
s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
|
||||
if !ok {
|
||||
grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
|
||||
s.statusCode = codes.Unknown
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
|
@ -667,52 +708,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
|||
}
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers. It returns the current
|
||||
// stream if there are remaining headers on the wire (in the following
|
||||
// Continuation frame).
|
||||
func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) {
|
||||
defer func() {
|
||||
if pendingStream == nil {
|
||||
hDec.state = decodeState{}
|
||||
}
|
||||
}()
|
||||
endHeaders, err := hDec.decodeClientHTTP2Headers(frame)
|
||||
if s == nil {
|
||||
// s has been closed.
|
||||
return nil
|
||||
// operateHeaders takes action on the decoded headers.
|
||||
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
s, ok := t.getStream(frame)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.write(recvMsg{err: err})
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
state.processHeaderField(hf)
|
||||
}
|
||||
if state.err != nil {
|
||||
s.write(recvMsg{err: state.err})
|
||||
// Something wrong. Stops reading even when there is remaining.
|
||||
return nil
|
||||
}
|
||||
if !endHeaders {
|
||||
return s
|
||||
return
|
||||
}
|
||||
|
||||
endStream := frame.StreamEnded()
|
||||
|
||||
s.mu.Lock()
|
||||
if !endStream {
|
||||
s.recvCompress = state.encoding
|
||||
}
|
||||
if !s.headerDone {
|
||||
if !endStream && len(hDec.state.mdata) > 0 {
|
||||
s.header = hDec.state.mdata
|
||||
if !endStream && len(state.mdata) > 0 {
|
||||
s.header = state.mdata
|
||||
}
|
||||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
if !endStream || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
if len(hDec.state.mdata) > 0 {
|
||||
s.trailer = hDec.state.mdata
|
||||
if len(state.mdata) > 0 {
|
||||
s.trailer = state.mdata
|
||||
}
|
||||
s.state = streamDone
|
||||
s.statusCode = hDec.state.statusCode
|
||||
s.statusDesc = hDec.state.statusDesc
|
||||
s.statusCode = state.statusCode
|
||||
s.statusDesc = state.statusDesc
|
||||
s.mu.Unlock()
|
||||
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleMalformedHTTP2(s *Stream, err error) {
|
||||
s.mu.Lock()
|
||||
if !s.headerDone {
|
||||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: err})
|
||||
}
|
||||
|
||||
// reader runs as a separate goroutine in charge of reading data from network
|
||||
|
@ -735,25 +783,31 @@ func (t *http2Client) reader() {
|
|||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
hDec := newHPACKDecoder()
|
||||
var curStream *Stream
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
return
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
// http2.StreamError. This can happen only if the server's response
|
||||
// is malformed http2.
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
if s != nil {
|
||||
// use error detail to provide better err message
|
||||
handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// Transport error.
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.HeadersFrame:
|
||||
// operateHeaders has to be invoked regardless the value of curStream
|
||||
// because the HPACK decoder needs to be updated using the received
|
||||
// headers.
|
||||
curStream, _ = t.getStream(frame)
|
||||
endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, endStream)
|
||||
case *http2.ContinuationFrame:
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded())
|
||||
case *http2.MetaHeadersFrame:
|
||||
t.operateHeaders(frame)
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
case *http2.RSTStreamFrame:
|
||||
|
|
|
@ -49,6 +49,7 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
|
@ -61,8 +62,8 @@ type http2Server struct {
|
|||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by sending a value on writableChan
|
||||
// and releases it by receiving from writableChan.
|
||||
// A writer acquires the write lock by receiving a value on writableChan
|
||||
// and releases it by sending on writableChan.
|
||||
writableChan chan int
|
||||
// shutdownChan is closed when Close is called.
|
||||
// Blocking operations should select on shutdownChan to avoid
|
||||
|
@ -135,66 +136,69 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI
|
|||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers. It returns the current
|
||||
// stream if there are remaining headers on the wire (in the following
|
||||
// Continuation frame).
|
||||
func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) {
|
||||
defer func() {
|
||||
if pendingStream == nil {
|
||||
hDec.state = decodeState{}
|
||||
}
|
||||
}()
|
||||
endHeaders, err := hDec.decodeServerHTTP2Headers(frame)
|
||||
if s == nil {
|
||||
// s has been closed.
|
||||
return nil
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) {
|
||||
buf := newRecvBuffer()
|
||||
s := &Stream{
|
||||
id: frame.Header().StreamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: initialWindowSize},
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Printf("transport: http2Server.operateHeader found %v", err)
|
||||
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
state.processHeaderField(hf)
|
||||
}
|
||||
if err := state.err; err != nil {
|
||||
if se, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if endStream {
|
||||
|
||||
if frame.StreamEnded() {
|
||||
// s is just created by the caller. No lock needed.
|
||||
s.state = streamReadDone
|
||||
}
|
||||
if !endHeaders {
|
||||
return s
|
||||
}
|
||||
if hDec.state.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)
|
||||
s.recvCompress = state.encoding
|
||||
if state.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(context.TODO())
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
s.ctx = credentials.NewContext(s.ctx, t.authInfo)
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
// Cache the current stream to the context so that the server application
|
||||
// can find out. Required when the server wants to send some metadata
|
||||
// back to the client (unary call only).
|
||||
s.ctx = newContextWithStream(s.ctx, s)
|
||||
// Attach the received metadata to the context.
|
||||
if len(hDec.state.mdata) > 0 {
|
||||
s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)
|
||||
if len(state.mdata) > 0 {
|
||||
s.ctx = metadata.NewContext(s.ctx, state.mdata)
|
||||
}
|
||||
|
||||
s.dec = &recvBufferReader{
|
||||
ctx: s.ctx,
|
||||
recv: s.buf,
|
||||
}
|
||||
s.method = hDec.state.method
|
||||
s.recvCompress = state.encoding
|
||||
s.method = state.method
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
|
||||
return nil
|
||||
return
|
||||
}
|
||||
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||
t.activeStreams[s.id] = s
|
||||
|
@ -203,7 +207,6 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header
|
|||
t.updateWindow(s, uint32(n))
|
||||
}
|
||||
handle(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
|
@ -236,16 +239,24 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
|||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
hDec := newHPACKDecoder()
|
||||
var curStream *Stream
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
if s != nil {
|
||||
t.closeStream(s)
|
||||
}
|
||||
t.controlBuf.put(&resetStream{se.StreamID, se.Code})
|
||||
continue
|
||||
}
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.HeadersFrame:
|
||||
case *http2.MetaHeadersFrame:
|
||||
id := frame.Header().StreamID
|
||||
if id%2 != 1 || id <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
|
@ -254,21 +265,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
|||
break
|
||||
}
|
||||
t.maxStreamID = id
|
||||
buf := newRecvBuffer()
|
||||
fc := &inFlow{
|
||||
limit: initialWindowSize,
|
||||
conn: t.fc,
|
||||
}
|
||||
curStream = &Stream{
|
||||
id: frame.Header().StreamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: fc,
|
||||
}
|
||||
endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle)
|
||||
case *http2.ContinuationFrame:
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded(), handle)
|
||||
t.operateHeaders(frame, handle)
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
case *http2.RSTStreamFrame:
|
||||
|
@ -306,33 +303,51 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
|
|||
// Window updates will deliver to the controller for sending when
|
||||
// the cumulative quota exceeds the corresponding threshold.
|
||||
func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
||||
swu, cwu := s.fc.onRead(n)
|
||||
if swu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, swu})
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.state == streamDone {
|
||||
return
|
||||
}
|
||||
if cwu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cwu})
|
||||
if w := t.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
if w := s.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
size := len(f.Data())
|
||||
if err := t.fc.onData(uint32(size)); err != nil {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
return
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
// The stream has been closed. Release the corresponding quota.
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
s.mu.Unlock()
|
||||
t.closeStream(s)
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
|
@ -441,6 +456,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
for k, v := range md {
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
|
@ -503,6 +521,10 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
// TODO(zhaoq): Support multi-writers for a single stream.
|
||||
var writeHeaderFrame bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return StreamErrorf(codes.Unknown, "the stream has been done")
|
||||
}
|
||||
if !s.headerOk {
|
||||
writeHeaderFrame = true
|
||||
s.headerOk = true
|
||||
|
@ -515,6 +537,9 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: t.hBuf.Bytes(),
|
||||
|
@ -567,6 +592,10 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
// Got some quota. Try to acquire writing privilege on the
|
||||
// transport.
|
||||
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
// Return the connection quota back.
|
||||
t.sendQuotaPool.add(ps)
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
// This writer is the last one in this batch and has the
|
||||
// responsibility to flush the buffered frames. It queues
|
||||
|
@ -576,6 +605,16 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
t.sendQuotaPool.add(ps)
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return ContextErr(s.ctx.Err())
|
||||
default:
|
||||
}
|
||||
var forceFlush bool
|
||||
if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
|
||||
forceFlush = true
|
||||
|
@ -673,20 +712,22 @@ func (t *http2Server) closeStream(s *Stream) {
|
|||
t.mu.Lock()
|
||||
delete(t.activeStreams, s.id)
|
||||
t.mu.Unlock()
|
||||
if q := s.fc.restoreConn(); q > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, q})
|
||||
}
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
s.mu.Lock()
|
||||
if q := s.fc.resetPendingData(); q > 0 {
|
||||
if w := t.fc.onRead(q); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
}
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
s.mu.Unlock()
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
}
|
||||
|
||||
func (t *http2Server) RemoteAddr() net.Addr {
|
||||
|
|
|
@ -62,13 +62,14 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2RSTErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeStreamClosed: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
|
@ -76,6 +77,7 @@ var (
|
|||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
|
@ -89,6 +91,9 @@ var (
|
|||
// Records the states during HPACK decoding. Must be reset once the
|
||||
// decoding of the entire headers are finished.
|
||||
type decodeState struct {
|
||||
err error // first error encountered decoding
|
||||
|
||||
encoding string
|
||||
// statusCode caches the stream status received from the trailer
|
||||
// the server sent. Client side only.
|
||||
statusCode codes.Code
|
||||
|
@ -101,25 +106,11 @@ type decodeState struct {
|
|||
mdata map[string][]string
|
||||
}
|
||||
|
||||
// An hpackDecoder decodes HTTP2 headers which may span multiple frames.
|
||||
type hpackDecoder struct {
|
||||
h *hpack.Decoder
|
||||
state decodeState
|
||||
err error // The err when decoding
|
||||
}
|
||||
|
||||
// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame.
|
||||
type headerFrame interface {
|
||||
Header() http2.FrameHeader
|
||||
HeaderBlockFragment() []byte
|
||||
HeadersEnded() bool
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
func isReservedHeader(hdr string) bool {
|
||||
if hdr[0] == ':' {
|
||||
if hdr != "" && hdr[0] == ':' {
|
||||
return true
|
||||
}
|
||||
switch hdr {
|
||||
|
@ -136,98 +127,62 @@ func isReservedHeader(hdr string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func newHPACKDecoder() *hpackDecoder {
|
||||
d := &hpackDecoder{}
|
||||
d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !strings.Contains(f.Value, "application/grpc") {
|
||||
d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header")
|
||||
return
|
||||
}
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
return
|
||||
}
|
||||
d.state.statusCode = codes.Code(code)
|
||||
case "grpc-message":
|
||||
d.state.statusDesc = f.Value
|
||||
case "grpc-timeout":
|
||||
d.state.timeoutSet = true
|
||||
var err error
|
||||
d.state.timeout, err = timeoutDecode(f.Value)
|
||||
if err != nil {
|
||||
d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
return
|
||||
}
|
||||
case ":path":
|
||||
d.state.method = f.Value
|
||||
default:
|
||||
if !isReservedHeader(f.Name) {
|
||||
if f.Name == "user-agent" {
|
||||
i := strings.LastIndex(f.Value, " ")
|
||||
if i == -1 {
|
||||
// There is no application user agent string being set.
|
||||
return
|
||||
}
|
||||
// Extract the application user agent string.
|
||||
f.Value = f.Value[:i]
|
||||
}
|
||||
if d.state.mdata == nil {
|
||||
d.state.mdata = make(map[string][]string)
|
||||
}
|
||||
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||
if err != nil {
|
||||
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||
func (d *decodeState) setErr(err error) {
|
||||
if d.err == nil {
|
||||
d.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !strings.Contains(f.Value, "application/grpc") {
|
||||
d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
|
||||
return
|
||||
}
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
|
||||
return
|
||||
}
|
||||
d.statusCode = codes.Code(code)
|
||||
case "grpc-message":
|
||||
d.statusDesc = f.Value
|
||||
case "grpc-timeout":
|
||||
d.timeoutSet = true
|
||||
var err error
|
||||
d.timeout, err = timeoutDecode(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
|
||||
return
|
||||
}
|
||||
case ":path":
|
||||
d.method = f.Value
|
||||
default:
|
||||
if !isReservedHeader(f.Name) {
|
||||
if f.Name == "user-agent" {
|
||||
i := strings.LastIndex(f.Value, " ")
|
||||
if i == -1 {
|
||||
// There is no application user agent string being set.
|
||||
return
|
||||
}
|
||||
d.state.mdata[k] = append(d.state.mdata[k], v)
|
||||
// Extract the application user agent string.
|
||||
f.Value = f.Value[:i]
|
||||
}
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
}
|
||||
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||
if err != nil {
|
||||
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||
return
|
||||
}
|
||||
d.mdata[k] = append(d.mdata[k], v)
|
||||
}
|
||||
})
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
|
||||
d.err = nil
|
||||
_, err = d.h.Write(frame.HeaderBlockFragment())
|
||||
if err != nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
|
||||
}
|
||||
|
||||
if frame.HeadersEnded() {
|
||||
if closeErr := d.h.Close(); closeErr != nil && err == nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
|
||||
}
|
||||
endHeaders = true
|
||||
}
|
||||
|
||||
if err == nil && d.err != nil {
|
||||
err = d.err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
|
||||
d.err = nil
|
||||
_, err = d.h.Write(frame.HeaderBlockFragment())
|
||||
if err != nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
|
||||
}
|
||||
|
||||
if frame.HeadersEnded() {
|
||||
if closeErr := d.h.Close(); closeErr != nil && err == nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
|
||||
}
|
||||
endHeaders = true
|
||||
}
|
||||
|
||||
if err == nil && d.err != nil {
|
||||
err = d.err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
|
@ -318,10 +273,11 @@ type framer struct {
|
|||
|
||||
func newFramer(conn net.Conn) *framer {
|
||||
f := &framer{
|
||||
reader: conn,
|
||||
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
|
@ -449,3 +405,7 @@ func (f *framer) flushWrite() error {
|
|||
func (f *framer) readFrame() (http2.Frame, error) {
|
||||
return f.fr.ReadFrame()
|
||||
}
|
||||
|
||||
func (f *framer) errorDetail() error {
|
||||
return f.fr.ErrorDetail()
|
||||
}
|
||||
|
|
|
@ -63,13 +63,11 @@ type recvMsg struct {
|
|||
err error
|
||||
}
|
||||
|
||||
func (recvMsg) isItem() bool {
|
||||
return true
|
||||
}
|
||||
func (*recvMsg) item() {}
|
||||
|
||||
// All items in an out of a recvBuffer should be the same type.
|
||||
type item interface {
|
||||
isItem() bool
|
||||
item()
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of item.
|
||||
|
@ -89,12 +87,14 @@ func newRecvBuffer() *recvBuffer {
|
|||
func (b *recvBuffer) put(r item) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.backlog = append(b.backlog, r)
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
|
@ -170,11 +170,13 @@ type Stream struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
// method records the associated RPC method of the stream.
|
||||
method string
|
||||
buf *recvBuffer
|
||||
dec io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
method string
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
dec io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
// The accumulated inbound quota pending for window update.
|
||||
updateQuota uint32
|
||||
// The handler to control the window update procedure for both this
|
||||
|
@ -201,6 +203,17 @@ type Stream struct {
|
|||
statusDesc string
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is cancelled/expired.
|
||||
|
@ -286,20 +299,18 @@ func (s *Stream) Read(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
// The key to save transport.Stream in the context.
|
||||
const streamKey = key(0)
|
||||
type streamKey struct{}
|
||||
|
||||
// newContextWithStream creates a new context from ctx and attaches stream
|
||||
// to it.
|
||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||
return context.WithValue(ctx, streamKey, stream)
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// StreamFromContext returns the stream saved in ctx.
|
||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||
s, ok = ctx.Value(streamKey).(*Stream)
|
||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -339,20 +350,40 @@ func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, e
|
|||
// Options provides additional hints and information for message
|
||||
// transmission.
|
||||
type Options struct {
|
||||
// Indicate whether it is the last piece for this stream.
|
||||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
// The hint to transport impl whether the data could be buffered for
|
||||
// batching write. Transport impl can feel free to ignore it.
|
||||
|
||||
// Delay is a hint to the transport implementation for whether
|
||||
// the data could be buffered for a batching write. The
|
||||
// Transport implementation may ignore the hint.
|
||||
Delay bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
type CallHdr struct {
|
||||
Host string // peer host
|
||||
Method string // the operation to perform on the specified host
|
||||
// Host specifies the peer's host.
|
||||
Host string
|
||||
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// RecvCompress specifies the compression algorithm applied on
|
||||
// inbound messages.
|
||||
RecvCompress string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
||||
// Flush indicates whether a new stream command should be sent
|
||||
// to the peer without waiting for the first data. This is
|
||||
// only a hint. The transport may modify the flush decision
|
||||
// for performance purposes.
|
||||
Flush bool
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client side transport
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
// implementations.
|
||||
type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
|
@ -381,21 +412,33 @@ type ClientTransport interface {
|
|||
Error() <-chan struct{}
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server side transport
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
// implementations.
|
||||
//
|
||||
// Methods may be called concurrently from multiple goroutines, but
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// WriteStatus sends the status of a stream to the client.
|
||||
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||
// Write sends the data for the given stream.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
// WriteHeader sends the header metedata for the given stream.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream))
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client.
|
||||
// WriteStatus is the final call made on a stream and always
|
||||
// occurs.
|
||||
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close() error
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue